Intructions¶

Adjust the root folders first

Setup environment¶

Google colab¶

In [ ]:
from google.colab import drive
drive.mount('/content/drive')
root_folder = "/content/drive/MyDrive/Licenta_INFO_selection"

Local folder¶

In [ ]:
import os
root_folder = "/Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_selection"

Data extraction and processing¶

Data extraction¶

Root data folder¶

In [2]:
import os
root_data_relative_path = "Data"

root_data_folder_path = os.path.join(root_folder, root_data_relative_path)
print("Root data folder path:", root_data_folder_path)
Root data folder path: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Data

Dukascopy data¶

Install plugin¶

In [ ]:
!pip install dukascopy-python

Download and save data¶

In [ ]:
import pandas as pd
import os
from datetime import datetime
import dukascopy_python as dp
from dukascopy_python.instruments import (
    INSTRUMENT_FX_MAJORS_EUR_USD,
    INSTRUMENT_FX_METALS_XAU_USD,
    INSTRUMENT_IDX_AMERICA_E_SANDP_500
)

output_dir_Dukascopy = os.path.join(root_data_relative_path, "Dukascopy")
os.makedirs(output_dir_Dukascopy, exist_ok=True)

dukascopy_symbols = {
    "EURUSD": INSTRUMENT_FX_MAJORS_EUR_USD,
    "GOLD": INSTRUMENT_FX_METALS_XAU_USD,
    "SP500": INSTRUMENT_IDX_AMERICA_E_SANDP_500
}

dukascopy_timeframes = {
    "5min":  dp.INTERVAL_MIN_5,
    "hourly": dp.INTERVAL_HOUR_1,
}


start_date = datetime(2007, 1, 1)
end_date = datetime(2025, 7, 10)

dukascopy_data = {}

for label, instrument in dukascopy_symbols.items():
    dukascopy_data[label] = {}
    print(f"\nDownloading data for {label}")

    for tf_label, (interval_value) in dukascopy_timeframes.items():
        print(f"  Timeframe: {tf_label}")
        try:
            df = dp.fetch(
                instrument=instrument,
                interval=interval_value,
                offer_side=dp.OFFER_SIDE_BID,
                start=start_date,
                end=end_date
            )

            if not df.empty:
                df = df.rename(columns={
                    "timestamp": "time",
                    "open": "open",
                    "high": "high",
                    "low": "low",
                    "close": "close",
                    "volume": "volume"
                })
                df = df.reset_index()
                df["time"] = pd.to_datetime(df["timestamp"]).dt.strftime("%Y-%m-%d %H:%M")
                df.set_index("time", inplace=True)


                dukascopy_data[label][tf_label] = df

                filename = f"{label}_{tf_label}.csv"
                df.to_csv(os.path.join(output_dir_Dukascopy, filename))
            else:
                print(f"  No data for {label} at {tf_label}")
        except Exception as e:
            print(f"  Error downloading {label} at {tf_label}: {e}")

print("\nDownload and save complete.")

Alpaca data¶

Install plugin¶

In [ ]:
!pip install alpaca-py

Download and save data

In [ ]:
from alpaca.data.historical import StockHistoricalDataClient
from alpaca.data.requests import StockBarsRequest
from alpaca.data.timeframe import TimeFrame, TimeFrameUnit
from datetime import datetime
import pandas as pd
import os


output_dir_Alpaca = os.path.join(root_data_relative_path, "Alpaca")
os.makedirs(output_dir_Alpaca, exist_ok=True)

ALPACA_API_KEY = "PK2H3O5SAYN8TS1MONVR"
ALPACA_SECRET_KEY = "LSwqMQnADiZn47RIYA91KIiKg7K7B66YYEXPlTQP"

alpaca_stocks_tickers = ["AAPL", "MSFT", "GE", "BAC", "C"]

alpaca_timeframes = {
    "daily": TimeFrame(1, TimeFrameUnit.Day),
    "hourly": TimeFrame(1, TimeFrameUnit.Hour),
    "5min": TimeFrame(5, TimeFrameUnit.Minute)
}

alpaca_start_date = datetime(2000, 1, 1)
alpaca_end_date = datetime(2025, 7, 10)

client = StockHistoricalDataClient(ALPACA_API_KEY, ALPACA_SECRET_KEY)

alpaca_stocks_data = {}


for ticker in alpaca_stocks_tickers:
    alpaca_stocks_data[ticker] = {}
    print(f"\n Downloading {ticker}")
    for tf_label, tf_enum in alpaca_timeframes.items():
        print(f"Timeframe: {tf_label}")
        try:
            req = StockBarsRequest(
                symbol_or_symbols=ticker,
                timeframe=tf_enum,
                start=alpaca_start_date,
                end=alpaca_end_date
            )
            bars = client.get_stock_bars(req)
            df = bars.df
            if not df.empty:

                df = df.reset_index()
                df["time"] = pd.to_datetime(df["timestamp"]).dt.strftime("%Y-%m-%d %H:%M")

                df = df.rename(columns={
                    "open": "open",
                    "high": "high",
                    "low": "low",
                    "close": "close",
                    "volume": "volume"
                })

                alpaca_stocks_data[ticker][tf_label] = df
                file_path = os.path.join(output_dir_Alpaca, f"{ticker}_{tf_label}.csv")
                df.to_csv(file_path, index=False)
            else:
                print(f"No data for {ticker} [{tf_label}]")
        except Exception as e:
            print(f"Error downloading {ticker} [{tf_label}]: {e}")

print("\n Download and save complete.")

Binance data¶

Install plugin¶

In [ ]:
!pip install python-binance

Download and save data¶

In [ ]:
from binance.client import Client
from binance.exceptions import BinanceAPIException
import pandas as pd
import os


BINANCE_API_KEY = 'pENAEgy4xAygFaHCnUVsYnJRN9nsjIhvrG5IATVZMDapqXKTSJgsJC3bnqtdZNGy'
BINANCE_SECRET_KEY = 'BBO7CzGlNUgbqOhY27ofoVIK5dtM8dWLqKVMXYgkbpKEw3o4bdbvwySOcY8OxPaE'

client = Client(BINANCE_API_KEY, BINANCE_SECRET_KEY)


binance_coins = ["BTCUSDT"]

binance_timeframes = {
    "daily": "1d",
    "hourly": "1h",
    "5min": "5m"
}

start_date = "8 Aug, 2017"
end_date = "10 Jul, 2025"
output_dir_Binance = os.path.join(root_data_relative_path, "Binance")
os.makedirs(output_dir_Binance, exist_ok=True)


binance_data = {}

for symbol in binance_coins:
    binance_data[symbol] = {}
    print(f"\n Downloading {symbol}")
    for tf_label, tf_binance in binance_timeframes.items():
        print(f" Timeframe: {tf_label}")
        try:
            klines = client.get_historical_klines(
                symbol=symbol,
                interval=tf_binance,
                start_str=start_date,
                end_str=end_date)

            if not klines:
                print(f" No data for {symbol} at {tf_label}")
                continue

            df = pd.DataFrame(klines, columns=[
                "Open time", "Open", "High", "Low", "Close", "Volume",
                "Close time", "Quote asset volume", "Number of trades",
                "Taker buy base volume", "Taker buy quote volume", "Ignore"
            ])

            df["time"] = pd.to_datetime(df["Open time"], unit='ms')
            df["time"] = df["time"].dt.strftime("%Y-%m-%d %H:%M")

            df.rename(columns={
                "Open": "open",
                "High": "high",
                "Low": "low",
                "Close": "close",
                "Volume": "volume"
            }, inplace=True)

            filename = f"{symbol}_{tf_label}.csv"
            df.to_csv(os.path.join(output_dir_Binance, filename), index=False)

            binance_data[symbol][tf_label] = df

        except BinanceAPIException as e:
            print(f" Error: {e}")

print("\n Download and save complete.")

Yahoo data¶

Install plugin¶

In [ ]:
!pip install yfinance

Download and save data¶

In [ ]:
import yfinance as yf
import pandas as pd
import os


output_dir_Yahoo = os.path.join(root_data_relative_path, "Yahoo")
os.makedirs(output_dir_Yahoo, exist_ok=True)


yahoo_tickers = {
    "AAPL": "Apple",
    "MSFT": "Microsoft",
    "GC=F": "Gold",
    "^GSPC": "S&P 500",
    "EURUSD=X": "EUR-USD",
    "GE": "General Electric",
    "BAC": "Bank of America",
    "C": "Citigroup",
    "BTC-USD": "Bitcoin"
}


yahoo_data_dict = {}


for symbol in yahoo_tickers:
    print(f"Downloading data for {yahoo_tickers[symbol]} ({symbol})...")
    df = yf.download(
        symbol,
        start="2000-01-01",
        end="2025-07-10",
        interval="1d",
        auto_adjust=True,
        progress=False)

    if isinstance(df.columns, pd.MultiIndex):
        df.columns = df.columns.get_level_values(0)

    df = df.reset_index()

    df = df.rename(columns={
        "Date": "time",
        "Open": "open",
        "High": "high",
        "Low": "low",
        "Close": "close",
        "Volume": "volume"
    })

    df["time"] = pd.to_datetime(df["time"]).dt.strftime("%Y-%m-%d %H:%M")

    yahoo_data_dict[symbol] = df
    safe_symbol = symbol.replace("=", "").replace("^", "")
    file_path = os.path.join(output_dir_Yahoo, f"{safe_symbol}_daily.csv")
    df.to_csv(file_path, index=False)

print(f"Data saved in: {output_dir_Yahoo}")

Install GARCH¶

In [ ]:
!pip install arch
!pip install tqdm

Extract GARCH and save the processed data¶

In [ ]:
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
from arch import arch_model
from tqdm import tqdm
import warnings
import os
import joblib
import pickle

alpaca_stocks_tickers = ["AAPL", "MSFT", "GE", "BAC", "C"]
alpaca_timeframes = ["daily", "hourly", "5min"]

dir_Alpaca = os.path.join(root_data_folder_path, "Alpaca")


def load_alpaca_data_from_csv(directory, ticker_list=alpaca_stocks_tickers, timeframes=alpaca_timeframes):
    data_loaded = {}
    for file in os.listdir(directory):
        if file.endswith(".csv"):
            try:
                name_parts = file.replace(".csv", "").split("_")
                ticker, tf = name_parts[0], name_parts[1]
                if ticker_list and ticker not in ticker_list:
                    continue
                if timeframes and tf not in timeframes:
                    continue
                df = pd.read_csv(
                    os.path.join(directory, file),
                    index_col=1,
                    parse_dates=True,
                    date_format='mixed')

                if ticker not in data_loaded:
                    data_loaded[ticker] = {}

                df = df.sort_index()
                data_loaded[ticker][tf] = df
            except Exception as e:
                print(f"Error loading {file}: {e}")
    return data_loaded


alpaca_loaded_data = load_alpaca_data_from_csv(dir_Alpaca, alpaca_stocks_tickers, alpaca_timeframes)
print(alpaca_loaded_data["AAPL"]["daily"].head())


for ticker in alpaca_loaded_data:
    print(f"{ticker}: {list(alpaca_loaded_data[ticker].keys())}")



binance_coins = ["BTCUSDT"]

binance_timeframes = ["daily", "hourly", "5min"]

dir_Binance = os.path.join(root_data_folder_path, "Binance")


def load_binance_data_from_csv(directory, coins=binance_coins, timeframes=binance_timeframes):
    data = {}
    for file in os.listdir(directory):
        if file.endswith(".csv"):
            try:
                name_parts = file.replace(".csv", "").split("_")
                ticker, tf = name_parts[0], name_parts[1]
                if coins and ticker not in coins:
                    continue
                if timeframes and tf not in timeframes:
                    continue
                df = pd.read_csv(
                    os.path.join(directory, file),
                    index_col=12,
                    parse_dates=True,
                    date_format='mixed')

                df = df.sort_index()
                df.drop(columns=["Open time", "Close time"], inplace=True)


                if ticker not in data:
                    data[ticker] = {}
                data[ticker][tf] = df
            except Exception as e:
                print(f" Error loading {file}: {e}")
    return data


binance_loaded_data = load_binance_data_from_csv(dir_Binance, binance_coins, binance_timeframes)
print(binance_loaded_data["BTCUSDT"]["daily"].head())



dir_Dukascopy = os.path.join(root_data_folder_path, "Dukascopy")
dukascopy_symbols = ["EURUSD", "GOLD", "SP500"]
dukascopy_timeframes = ["5min", "hourly","daily"]

def load_dukascopy_data(directory, symbols=None, timeframes=None):
    data_loaded = {}
    for file in os.listdir(directory):
        if file.endswith(".csv"):
            try:
                name_parts = file.replace(".csv", "").split("_")
                ticker, tf = name_parts[0], name_parts[1]
                if symbols and ticker not in symbols:
                    continue
                if timeframes and tf not in timeframes:
                    continue
                df = pd.read_csv(os.path.join(directory, file), index_col=0, parse_dates=True)
                if ticker not in data_loaded:
                    data_loaded[ticker] = {}

                df = df.sort_index()
                data_loaded[ticker][tf] = df
            except Exception as e:
                print(f"Error loading {file}: {e}")
    return data_loaded

dukascopy_loaded_data = load_dukascopy_data(dir_Dukascopy, dukascopy_symbols, dukascopy_timeframes)
print(dukascopy_loaded_data["EURUSD"]["daily"].head())

all_data = {}

all_tickers = alpaca_stocks_tickers + binance_coins + dukascopy_symbols
selected_time_frames =["daily", "5min"]

for data_source in [alpaca_loaded_data, binance_loaded_data, dukascopy_loaded_data]:
    for ticker, tf_dict in data_source.items():
        if ticker not in all_data:
            all_data[ticker] = {}
        for tf_label, df in tf_dict.items():
            all_data[ticker][tf_label] = df



cleaned_data_no_interpolation = {}

for ticker in all_tickers:
    if ticker not in all_data:
        continue

    cleaned_data_no_interpolation[ticker] = {}

    for tf in selected_time_frames:
        if tf not in all_data[ticker]:
            continue

        df = all_data[ticker][tf].copy()
        df.index = pd.to_datetime(df.index)

        df.columns = [col.lower() for col in df.columns]
        df = df[["open", "close", "high", "low", "volume"]]
        df.index.name = "time"
        df = df.sort_index()

        if tf == "daily":
            df = df[df["volume"] > 0]

        df["scaled_log_return"] = np.log(df["close"] / df["close"].shift(1))
        df = df.dropna()

        cleaned_data_no_interpolation[ticker][tf] = df

print("Cleaned data without interpolation and computed scaled log returns for all selected timeframes.")



for ticker, tf_dict in cleaned_data_no_interpolation.items():
    print(f"\nTicker: {ticker}")
    for tf, df in tf_dict.items():
        if not isinstance(df, pd.DataFrame) or df.empty:
            print(f"  Timeframe: {tf} – EMPTY or not a DataFrame")
            continue
        df = df.sort_index()
        start_date = df.index.min()
        end_date = df.index.max()
        n_rows = len(df)
        print(f"  Timeframe: {tf}")
        print(f"    Start: {start_date}")
        print(f"    End:   {end_date}")
        print(f"    Rows:  {n_rows}")



cleaned_data_no_interpolation_with_variance = {}
missing_rv_stats = []

for ticker in all_tickers:
    if ticker not in cleaned_data_no_interpolation:
        continue
    if "5min" not in cleaned_data_no_interpolation[ticker] or "daily" not in cleaned_data_no_interpolation[ticker]:
        continue




    df_5min = cleaned_data_no_interpolation[ticker]["5min"].copy()
    df_daily = cleaned_data_no_interpolation[ticker]["daily"].copy()

    #Problem specific to EURUSD
    if ticker == "EURUSD":
        select_date = "2012-01-12"
        df_daily = df_daily[df_daily.index >= pd.to_datetime(start_date)]


    if "scaled_log_return" not in df_5min.columns:
        print(f"Skipping {ticker}: no scaled_log_return column in 5min data")
        continue

    df_5min.index = pd.to_datetime(df_5min.index)

    df_5min["date"] = df_5min.index.date
    daily_rv = df_5min.groupby("date")["scaled_log_return"].apply(lambda x: (x**2).sum()).rename("scaled_realized_variance")

    df_daily.index = pd.to_datetime(df_daily.index)
    df_daily["date"] = df_daily.index.date

    df_daily = df_daily.merge(daily_rv, on="date", how="left")
    df_daily = df_daily.set_index("date")
    df_daily = df_daily.iloc[1:]

    num_missing = df_daily["scaled_realized_variance"].isna().sum()
    total_rows = len(df_daily)
    missing_rv_stats.append({
        "ticker": ticker,
        "missing_rv_count": num_missing,
        "total_rows": total_rows,
        "missing_percentage": num_missing / total_rows * 100
    })

    #just 2 NAs get dropped here, one for gold, one for EURUSD
    df_daily = df_daily.dropna()

    cleaned_data_no_interpolation_with_variance[ticker] = {}
    cleaned_data_no_interpolation_with_variance[ticker]["daily"] = df_daily

print("Realized variance added to daily data.")


missing_rv_df = pd.DataFrame(missing_rv_stats)
print(missing_rv_df)



lookback = DateOffset(years=1)
min_obs = 200

model_configs = [
    ("TGARCH", 1, 1)
]



data_with_garch_no_interpolation = {}

for ticker in tqdm(cleaned_data_no_interpolation_with_variance.keys(), desc="Processing tickers"):
    if "daily" not in cleaned_data_no_interpolation_with_variance[ticker]:
        continue

    df_daily = cleaned_data_no_interpolation_with_variance[ticker]["daily"].copy()
    if "scaled_log_return" not in df_daily.columns:
        continue

    df_daily.index = pd.to_datetime(df_daily.index)
    df_daily = df_daily.sort_index().dropna()
    scaled_log_returns = df_daily["scaled_log_return"]

    start_eligible = df_daily.index[0] + lookback
    eligible_dates = df_daily.index[df_daily.index >= start_eligible]

    models_x = {f"x_{m}_{p}_{q}": dict(pred_var=[], bic=[], params=[], converged=[], warnings=[]) for m, p, q in model_configs}
    models_y = {f"y_{m}_{p}_{q}": dict(pred_var=[], bic=[], params=[], converged=[], warnings=[]) for m, p, q in model_configs}

    def fit_model_safe(model_func, model_type):
        try:
            with warnings.catch_warnings(record=True) as wlist:
                warnings.simplefilter("always")
                with np.errstate(all='ignore'):
                    model = model_func()
                    result = model.fit(disp="off", options={"maxiter": 1000})
                warning_msg = "; ".join([str(w.message) for w in wlist]) if wlist else ""
                forecast = result.forecast(horizon=1, reindex=False)
                pred_var = forecast.variance.iloc[-1, 0]
                converged = result.convergence_flag == 0
                return pred_var, result.bic, result.params.to_dict(), converged, warning_msg
        except Exception as e:
            return np.nan, np.nan, {}, False, str(e)

    for current_dt in eligible_dates:
        # Backward only window
        window_start = current_dt - lookback
        window_data = scaled_log_returns.loc[window_start:current_dt - pd.Timedelta(days=1)]
        window_data = window_data.replace([np.inf, -np.inf], np.nan).dropna()

        if len(window_data) < min_obs or not np.isfinite(window_data).all():
            for m in models_x.values():
                m["pred_var"].append(np.nan)
                m["bic"].append(np.nan)
                m["params"].append({})
                m["converged"].append(False)
                m["warnings"].append("Invalid or insufficient window")
        else:
            for model_type, p, q in model_configs:
                model_key = f"x_{model_type}_{p}_{q}"

                def model_factory():
                    if model_type == "TGARCH":
                        return arch_model(window_data, vol='GARCH', p=p, q=q, dist='t')
                    elif model_type == "GARCH":
                        return arch_model(window_data, vol='GARCH', p=p, q=q, dist='normal')
                    elif model_type == "ARCH":
                        return arch_model(window_data, vol='ARCH', p=p, dist='normal')
                    elif model_type == "GJRGARCH":
                        return arch_model(window_data, vol='GARCH', p=p, o=1, q=q, dist='normal')

                pred_var, bic, params, conv, warn = fit_model_safe(model_factory, model_type)
                models_x[model_key]["pred_var"].append(pred_var)
                models_x[model_key]["bic"].append(bic)
                models_x[model_key]["params"].append(params)
                models_x[model_key]["converged"].append(conv)
                models_x[model_key]["warnings"].append(warn)

        #Backward and forward window
        fwd_end = current_dt + pd.Timedelta(days=27)
        fwd_start = fwd_end - lookback
        fwd_data = scaled_log_returns.loc[fwd_start:fwd_end]
        fwd_data = fwd_data.replace([np.inf, -np.inf], np.nan).dropna()

        if len(fwd_data) < min_obs or not np.isfinite(fwd_data).all():
            for m in models_y.values():
                m["pred_var"].append(np.nan)
                m["bic"].append(np.nan)
                m["params"].append({})
                m["converged"].append(False)
                m["warnings"].append("Invalid or insufficient window")
        else:
            for model_type, p, q in model_configs:
                model_key = f"y_{model_type}_{p}_{q}"

                def model_factory():
                    if model_type == "TGARCH":
                        return arch_model(fwd_data, vol='GARCH', p=p, q=q, dist='t')
                    elif model_type == "GARCH":
                        return arch_model(fwd_data, vol='GARCH', p=p, q=q, dist='normal')
                    elif model_type == "ARCH":
                        return arch_model(fwd_data, vol='ARCH', p=p, dist='normal')
                    elif model_type == "GJRGARCH":
                        return arch_model(fwd_data, vol='GARCH', p=p, o=1, q=q, dist='normal')

                pred_var, bic, params, conv, warn = fit_model_safe(model_factory, model_type)
                models_y[model_key]["pred_var"].append(pred_var)
                models_y[model_key]["bic"].append(bic)
                models_y[model_key]["params"].append(params)
                models_y[model_key]["converged"].append(conv)
                models_y[model_key]["warnings"].append(warn)

    #Combine all into output dataframe
    df_out = df_daily.loc[eligible_dates].copy()

    def build_model_df(models_dict):
        data = {}
        for model_name, output in models_dict.items():
            data[f"{model_name}_pred_variance"] = output["pred_var"]
            data[f"{model_name}_BIC"] = output["bic"]
            data[f"{model_name}_converged"] = output["converged"]
            data[f"{model_name}_warnings"] = output["warnings"]
            for param in set().union(*output["params"]):
                data[f"{model_name}_param_{param}"] = [p.get(param, np.nan) for p in output["params"]]
        return pd.DataFrame(data, index=df_out.index)


    df_model_x = build_model_df(models_x)
    df_model_y = build_model_df(models_y)

    df_out = pd.concat([df_out, df_model_x, df_model_y], axis=1)
    df_out = df_out.copy()


    print(f"\n--- Missing forecast rate for {ticker} ---")
    for model_key, model_dict in {**models_x, **models_y}.items():
        pred_var_list = model_dict["pred_var"]
        if len(pred_var_list) > 0:
            nan_rate = np.isnan(pred_var_list).mean()
            print(f"{ticker}: {nan_rate:.2%} NaNs in {model_key} forecast")
        else:
            print(f"{ticker}: No data processed for {model_key}")

    data_with_garch_no_interpolation[ticker] = df_out



output_dir_Processed = os.path.join(root_data_folder_path, "Processed")
processed_dict = {}

standard_params = (
    [f"alpha_{i}" for i in range(1, 3)] +
    [f"beta_{i}" for i in range(1, 3)] +
    ["omega"] +
    ["mu"] +
    ["nu_tgarch"] +
    [f"gamma_gjr_{i}" for i in range(1, 3)]
)

rows_with_large_variances = []
rows_only_with_large_variances = []
non_converged_rows = []

for ticker, df in data_with_garch_no_interpolation.items():
    rows = []

    for i, row in df.iterrows():
        new_row = {
            "date": i,
            "open": row.get("open", np.nan),
            "close": row.get("close", np.nan),
            "high": row.get("high", np.nan),
            "low": row.get("low", np.nan),
            "lg_return": row.get("scaled_log_return", np.nan),
            "volume": row.get("volume", np.nan),
            "y_var": row.get("scaled_realized_variance", np.nan)
        }

        bics_x, pred_vars_x = {}, {}
        bics_y, pred_vars_y = {}, {}

        for model_type, p, q in model_configs:
            base = f"{model_type}_{p}_{q}"
            x_key = f"x_{base}"
            y_key = f"y_{base}"

            #X models
            conv_x = row.get(f"{x_key}_converged", False)
            pred_var_x = row.get(f"{x_key}_pred_variance", np.nan)
            if conv_x and np.isfinite(pred_var_x):
                bics_x[base] = row.get(f"{x_key}_BIC", np.nan)
                pred_vars_x[base] = pred_var_x

            new_row[f"{x_key.lower()}_var"] = pred_var_x if conv_x and np.isfinite(pred_var_x) else np.nan

            #Y models
            conv_y = row.get(f"{y_key}_converged", False)
            pred_var_y = row.get(f"{y_key}_pred_variance", np.nan)
            if conv_y and np.isfinite(pred_var_y):
                bics_y[base] = row.get(f"{y_key}_BIC", np.nan)
                pred_vars_y[base] = pred_var_y

            new_row[f"{y_key.lower()}_var"] = pred_var_y if conv_y and np.isfinite(pred_var_y) else np.nan


        for model_label, pred_vars in [("x", pred_vars_x), ("y", pred_vars_y)]:
            converged_vars = list(pred_vars.values())
            filtered_vars = [v for v in converged_vars if np.isfinite(v) and v < 1e10]
            avg_var = np.mean(filtered_vars) if filtered_vars else 1e10

            if any(v >= 1e10 for v in converged_vars):
                rows_with_large_variances.append({"ticker": ticker, "date": i, "model_type": model_label})

            if all(v >= 1e10 for v in converged_vars) and converged_vars:
                rows_only_with_large_variances.append({"ticker": ticker, "date": i, "model_type": model_label})

            if not pred_vars:
                non_converged_rows.append({"ticker": ticker, "date": i, "model_type": model_label})

            #Best model selection
            bics = bics_x if model_label == "x" else bics_y
            best_model = min(bics, key=bics.get) if bics else None
            best_pred_var = pred_vars.get(best_model, avg_var)

            new_row[f"{model_label}_best_model"] = best_model
            new_row[f"{model_label}_best_pred_var"] = best_pred_var

            for param in standard_params:
                new_row[f"{model_label}_best_{param}"] = 0.0

            if best_model:
                param_prefix = f"{model_label}_{best_model}_param_"
                for col in df.columns:
                    if col.startswith(param_prefix):
                        raw_param = col.split(param_prefix)[1]
                        value = row[col]
                        mapped = None

                        if raw_param.startswith("alpha["):
                            idx = int(raw_param[6:-1])
                            mapped = f"alpha_{idx}"

                        elif raw_param.startswith("beta["):
                            idx = int(raw_param[5:-1])
                            mapped = f"beta_{idx}"

                        elif raw_param == "omega":
                            mapped = "omega"

                        elif raw_param == "mu":
                            mapped = "mu"

                        elif raw_param.startswith("gamma["):
                            idx = int(raw_param[6:-1])
                            if "GJRGARCH" in best_model.upper():
                                mapped = f"gamma_gjr_{idx}"

                        elif raw_param == "nu":
                            mapped = "nu_tgarch"



                        if mapped and mapped in standard_params:
                            new_row[f"{model_label}_best_{mapped}"] = value

        rows.append(new_row)

    processed_dict[ticker] = pd.DataFrame(rows)
    processed_dict[ticker].to_csv(
        os.path.join(output_dir_Processed, f"processed_{ticker}.csv"),
        index=False)



non_converged_df = pd.DataFrame(non_converged_rows)
processed_dict["AAPL"].info()

output_dir_Environment = os.path.join(root_folder, "Saved_objects")
os.makedirs(output_dir_Environment, exist_ok=True)
session_data_filename = os.path.join(output_dir_Environment, 'process_data_no_interpolation_session.pkl')



def is_picklable(obj):
    try:
        pickle.dumps(obj)
        return True
    except Exception:
        return False

session_vars = {
    k: v for k, v in globals().items()
    if not k.startswith("__") and is_picklable(v)
}

#save session data
joblib.dump(session_vars, session_data_filename)
print(f"Session saved with {len(session_vars)} variables to:\n{session_data_filename}")



#load the saved session
session_data_filename = os.path.join(root_folder,"Saved_objects/process_data_no_interpolation_session.pkl")
session = joblib.load(session_data_filename)
globals().update(session)

Data processing¶

Processed data folder path¶

In [69]:
data_relative_path = "Data/Processed"

data_folder_path = os.path.join(root_folder, data_relative_path)
print("Data folder path:", data_folder_path)
Data folder path: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Data/Processed

Importing data and renaming columns¶

In [70]:
import pandas as pd

tickers = ["AAPL", "MSFT", "GE", "BAC", "C", "BTCUSDT", "EURUSD", "GOLD", "SP500"]

df_dict = {
    ticker: pd.read_csv(f"{data_folder_path}/processed_{ticker}.csv", parse_dates=["date"])
    for ticker in tickers
}

print("Data size:")
for ticker in tickers:
    print(f"{ticker}: {df_dict[ticker].shape}")

    df = df_dict[ticker]
    df.rename(columns={
      "y_lg_return": "lg_return",
      "y_var": "rv"
    }, inplace=True)

    new_columns = {}
    for col in df.columns:
        if col.startswith("best_x_"):
            new_columns[col] = col.replace("best_x_", "x_best_param_")
    for col in df.columns:
        if col.startswith("best_y_"):
          new_columns[col] = col.replace("best_y_", "y_best_param_")

    df.rename(columns=new_columns, inplace=True)

print("\nData types:")
df_dict["AAPL"].info()
Data size:
AAPL: (2137, 32)
MSFT: (2137, 32)
GE: (2137, 32)
BAC: (2137, 32)
C: (2137, 32)
BTCUSDT: (2518, 32)
EURUSD: (3901, 32)
GOLD: (5653, 32)
SP500: (3585, 32)

Data types:
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 2137 entries, 0 to 2136
Data columns (total 32 columns):
 #   Column              Non-Null Count  Dtype         
---  ------              --------------  -----         
 0   date                2137 non-null   datetime64[ns]
 1   open                2137 non-null   float64       
 2   close               2137 non-null   float64       
 3   high                2137 non-null   float64       
 4   low                 2137 non-null   float64       
 5   lg_return           2137 non-null   float64       
 6   volume              2137 non-null   float64       
 7   rv                  2137 non-null   float64       
 8   x_tgarch_1_1_var    2137 non-null   float64       
 9   y_tgarch_1_1_var    2137 non-null   float64       
 10  x_best_model        2137 non-null   object        
 11  x_best_pred_var     2137 non-null   float64       
 12  x_best_alpha_1      2137 non-null   float64       
 13  x_best_alpha_2      2137 non-null   float64       
 14  x_best_beta_1       2137 non-null   float64       
 15  x_best_beta_2       2137 non-null   float64       
 16  x_best_omega        2137 non-null   float64       
 17  x_best_mu           2137 non-null   float64       
 18  x_best_nu_tgarch    2137 non-null   float64       
 19  x_best_gamma_gjr_1  2137 non-null   float64       
 20  x_best_gamma_gjr_2  2137 non-null   float64       
 21  y_best_model        2137 non-null   object        
 22  y_best_pred_var     2137 non-null   float64       
 23  y_best_alpha_1      2137 non-null   float64       
 24  y_best_alpha_2      2137 non-null   float64       
 25  y_best_beta_1       2137 non-null   float64       
 26  y_best_beta_2       2137 non-null   float64       
 27  y_best_omega        2137 non-null   float64       
 28  y_best_mu           2137 non-null   float64       
 29  y_best_nu_tgarch    2137 non-null   float64       
 30  y_best_gamma_gjr_1  2137 non-null   float64       
 31  y_best_gamma_gjr_2  2137 non-null   float64       
dtypes: datetime64[ns](1), float64(29), object(2)
memory usage: 534.4+ KB

Dropping the "best model" columns which contain strings¶

In [71]:
for ticker in tickers:
    df_dict[ticker] = df_dict[ticker].drop(["x_best_model", "y_best_model"], axis=1)
    print("Dropped columns for: ", ticker)
Dropped columns for:  AAPL
Dropped columns for:  MSFT
Dropped columns for:  GE
Dropped columns for:  BAC
Dropped columns for:  C
Dropped columns for:  BTCUSDT
Dropped columns for:  EURUSD
Dropped columns for:  GOLD
Dropped columns for:  SP500

Dropping some overlapping columns ONLY when working with a single GARCH¶

In [72]:
df_dict["EURUSD"].columns

for ticker in tickers:
    df_dict[ticker] = df_dict[ticker].drop(["x_tgarch_1_1_var", "y_tgarch_1_1_var"], axis=1)
    print("Dropped columns for: ", ticker)
Dropped columns for:  AAPL
Dropped columns for:  MSFT
Dropped columns for:  GE
Dropped columns for:  BAC
Dropped columns for:  C
Dropped columns for:  BTCUSDT
Dropped columns for:  EURUSD
Dropped columns for:  GOLD
Dropped columns for:  SP500

Trasnfoming predicted var columns to log scale¶

In [73]:
import numpy as np
import pandas as pd

cols = ["x_best_pred_var"]
#cols = ["x_best_pred_var", "y_best_pred_var"]
#cols = ["x_best_pred_var", "y_best_pred_var", "rv"]

for ticker in tickers:
    df = df_dict[ticker]
    for c in cols:
        if c in df:
            df.loc[:, c] = np.log(df[c].astype(float).clip(lower=1e-8))

Helper function to remove constant columns¶

They might emerge for some GARCH parameter.

In [74]:
import pandas as pd

def remove_constant_columns_from_df(df, ticker=None):
    constant_cols = [col for col in df.columns if df[col].nunique(dropna=False) == 1]

    if constant_cols:
        print(f"\n{ticker or 'DataFrame'} — Constant columns removed:")
        for col in constant_cols:
            print(f"  - {col}")

    return df.drop(columns=constant_cols)

for ticker in df_dict:
    df_dict[ticker] = remove_constant_columns_from_df(df_dict[ticker], ticker=ticker)
AAPL — Constant columns removed:
  - x_best_alpha_2
  - x_best_beta_2
  - x_best_gamma_gjr_1
  - x_best_gamma_gjr_2
  - y_best_alpha_2
  - y_best_beta_2
  - y_best_gamma_gjr_1
  - y_best_gamma_gjr_2

MSFT — Constant columns removed:
  - x_best_alpha_2
  - x_best_beta_2
  - x_best_gamma_gjr_1
  - x_best_gamma_gjr_2
  - y_best_alpha_2
  - y_best_beta_2
  - y_best_gamma_gjr_1
  - y_best_gamma_gjr_2

GE — Constant columns removed:
  - x_best_alpha_2
  - x_best_beta_2
  - x_best_gamma_gjr_1
  - x_best_gamma_gjr_2
  - y_best_alpha_2
  - y_best_beta_2
  - y_best_gamma_gjr_1
  - y_best_gamma_gjr_2

BAC — Constant columns removed:
  - x_best_alpha_2
  - x_best_beta_2
  - x_best_gamma_gjr_1
  - x_best_gamma_gjr_2
  - y_best_alpha_2
  - y_best_beta_2
  - y_best_gamma_gjr_1
  - y_best_gamma_gjr_2

C — Constant columns removed:
  - x_best_alpha_2
  - x_best_beta_2
  - x_best_gamma_gjr_1
  - x_best_gamma_gjr_2
  - y_best_alpha_2
  - y_best_beta_2
  - y_best_gamma_gjr_1
  - y_best_gamma_gjr_2

BTCUSDT — Constant columns removed:
  - x_best_alpha_2
  - x_best_beta_2
  - x_best_gamma_gjr_1
  - x_best_gamma_gjr_2
  - y_best_alpha_2
  - y_best_beta_2
  - y_best_gamma_gjr_1
  - y_best_gamma_gjr_2

EURUSD — Constant columns removed:
  - x_best_alpha_2
  - x_best_beta_2
  - x_best_gamma_gjr_1
  - x_best_gamma_gjr_2
  - y_best_alpha_2
  - y_best_beta_2
  - y_best_gamma_gjr_1
  - y_best_gamma_gjr_2

GOLD — Constant columns removed:
  - x_best_alpha_2
  - x_best_beta_2
  - x_best_gamma_gjr_1
  - x_best_gamma_gjr_2
  - y_best_alpha_2
  - y_best_beta_2
  - y_best_gamma_gjr_1
  - y_best_gamma_gjr_2

SP500 — Constant columns removed:
  - x_best_alpha_2
  - x_best_beta_2
  - x_best_gamma_gjr_1
  - x_best_gamma_gjr_2
  - y_best_alpha_2
  - y_best_beta_2
  - y_best_gamma_gjr_1
  - y_best_gamma_gjr_2

Helper function to check for very large values¶

We had some problems with EGARCH variacnes and parameters so we removed them from the data pipeline

In [75]:
import pandas as pd

def find_large_values(df_dict, threshold=1e6):
    for ticker, df in df_dict.items():
        print(f"\nChecking ticker: {ticker}")
        for col in df.columns:
            if pd.api.types.is_numeric_dtype(df[col]):
                max_val = df[col].max(skipna=True)
                min_val = df[col].min(skipna=True)
                if abs(max_val) > threshold or abs(min_val) > threshold:
                    print(f"Column '{col}' has large values:")
                    print(f"Max: {max_val}")
                    print(f"Min: {min_val}")
                else:
                    print(f"Column '{col}' OK!")

find_large_values(df_dict, threshold=1e10)
Checking ticker: AAPL
Column 'open' OK!
Column 'close' OK!
Column 'high' OK!
Column 'low' OK!
Column 'lg_return' OK!
Column 'volume' OK!
Column 'rv' OK!
Column 'x_best_pred_var' OK!
Column 'x_best_alpha_1' OK!
Column 'x_best_beta_1' OK!
Column 'x_best_omega' OK!
Column 'x_best_mu' OK!
Column 'x_best_nu_tgarch' OK!
Column 'y_best_pred_var' OK!
Column 'y_best_alpha_1' OK!
Column 'y_best_beta_1' OK!
Column 'y_best_omega' OK!
Column 'y_best_mu' OK!
Column 'y_best_nu_tgarch' OK!

Checking ticker: MSFT
Column 'open' OK!
Column 'close' OK!
Column 'high' OK!
Column 'low' OK!
Column 'lg_return' OK!
Column 'volume' OK!
Column 'rv' OK!
Column 'x_best_pred_var' OK!
Column 'x_best_alpha_1' OK!
Column 'x_best_beta_1' OK!
Column 'x_best_omega' OK!
Column 'x_best_mu' OK!
Column 'x_best_nu_tgarch' OK!
Column 'y_best_pred_var' OK!
Column 'y_best_alpha_1' OK!
Column 'y_best_beta_1' OK!
Column 'y_best_omega' OK!
Column 'y_best_mu' OK!
Column 'y_best_nu_tgarch' OK!

Checking ticker: GE
Column 'open' OK!
Column 'close' OK!
Column 'high' OK!
Column 'low' OK!
Column 'lg_return' OK!
Column 'volume' OK!
Column 'rv' OK!
Column 'x_best_pred_var' OK!
Column 'x_best_alpha_1' OK!
Column 'x_best_beta_1' OK!
Column 'x_best_omega' OK!
Column 'x_best_mu' OK!
Column 'x_best_nu_tgarch' OK!
Column 'y_best_pred_var' OK!
Column 'y_best_alpha_1' OK!
Column 'y_best_beta_1' OK!
Column 'y_best_omega' OK!
Column 'y_best_mu' OK!
Column 'y_best_nu_tgarch' OK!

Checking ticker: BAC
Column 'open' OK!
Column 'close' OK!
Column 'high' OK!
Column 'low' OK!
Column 'lg_return' OK!
Column 'volume' OK!
Column 'rv' OK!
Column 'x_best_pred_var' OK!
Column 'x_best_alpha_1' OK!
Column 'x_best_beta_1' OK!
Column 'x_best_omega' OK!
Column 'x_best_mu' OK!
Column 'x_best_nu_tgarch' OK!
Column 'y_best_pred_var' OK!
Column 'y_best_alpha_1' OK!
Column 'y_best_beta_1' OK!
Column 'y_best_omega' OK!
Column 'y_best_mu' OK!
Column 'y_best_nu_tgarch' OK!

Checking ticker: C
Column 'open' OK!
Column 'close' OK!
Column 'high' OK!
Column 'low' OK!
Column 'lg_return' OK!
Column 'volume' OK!
Column 'rv' OK!
Column 'x_best_pred_var' OK!
Column 'x_best_alpha_1' OK!
Column 'x_best_beta_1' OK!
Column 'x_best_omega' OK!
Column 'x_best_mu' OK!
Column 'x_best_nu_tgarch' OK!
Column 'y_best_pred_var' OK!
Column 'y_best_alpha_1' OK!
Column 'y_best_beta_1' OK!
Column 'y_best_omega' OK!
Column 'y_best_mu' OK!
Column 'y_best_nu_tgarch' OK!

Checking ticker: BTCUSDT
Column 'open' OK!
Column 'close' OK!
Column 'high' OK!
Column 'low' OK!
Column 'lg_return' OK!
Column 'volume' OK!
Column 'rv' OK!
Column 'x_best_pred_var' OK!
Column 'x_best_alpha_1' OK!
Column 'x_best_beta_1' OK!
Column 'x_best_omega' OK!
Column 'x_best_mu' OK!
Column 'x_best_nu_tgarch' OK!
Column 'y_best_pred_var' OK!
Column 'y_best_alpha_1' OK!
Column 'y_best_beta_1' OK!
Column 'y_best_omega' OK!
Column 'y_best_mu' OK!
Column 'y_best_nu_tgarch' OK!

Checking ticker: EURUSD
Column 'open' OK!
Column 'close' OK!
Column 'high' OK!
Column 'low' OK!
Column 'lg_return' OK!
Column 'volume' OK!
Column 'rv' OK!
Column 'x_best_pred_var' OK!
Column 'x_best_alpha_1' OK!
Column 'x_best_beta_1' OK!
Column 'x_best_omega' OK!
Column 'x_best_mu' OK!
Column 'x_best_nu_tgarch' OK!
Column 'y_best_pred_var' OK!
Column 'y_best_alpha_1' OK!
Column 'y_best_beta_1' OK!
Column 'y_best_omega' OK!
Column 'y_best_mu' OK!
Column 'y_best_nu_tgarch' OK!

Checking ticker: GOLD
Column 'open' OK!
Column 'close' OK!
Column 'high' OK!
Column 'low' OK!
Column 'lg_return' OK!
Column 'volume' OK!
Column 'rv' OK!
Column 'x_best_pred_var' OK!
Column 'x_best_alpha_1' OK!
Column 'x_best_beta_1' OK!
Column 'x_best_omega' OK!
Column 'x_best_mu' OK!
Column 'x_best_nu_tgarch' OK!
Column 'y_best_pred_var' OK!
Column 'y_best_alpha_1' OK!
Column 'y_best_beta_1' OK!
Column 'y_best_omega' OK!
Column 'y_best_mu' OK!
Column 'y_best_nu_tgarch' OK!

Checking ticker: SP500
Column 'open' OK!
Column 'close' OK!
Column 'high' OK!
Column 'low' OK!
Column 'lg_return' OK!
Column 'volume' OK!
Column 'rv' OK!
Column 'x_best_pred_var' OK!
Column 'x_best_alpha_1' OK!
Column 'x_best_beta_1' OK!
Column 'x_best_omega' OK!
Column 'x_best_mu' OK!
Column 'x_best_nu_tgarch' OK!
Column 'y_best_pred_var' OK!
Column 'y_best_alpha_1' OK!
Column 'y_best_beta_1' OK!
Column 'y_best_omega' OK!
Column 'y_best_mu' OK!
Column 'y_best_nu_tgarch' OK!

Saving the unstructured data for GARCH¶

In [76]:
import os
import pickle

objects_relative_path = "Saved_objects"

save_data_object_path = os.path.join(root_folder, objects_relative_path)
print("Object folder path:", save_data_object_path)
os.makedirs(save_data_object_path, exist_ok=True)

unstructured_dictionary_file_path = os.path.join(save_data_object_path, "unstructured_data_dict.pkl")

with open(unstructured_dictionary_file_path, "wb") as f:
    pickle.dump(df_dict, f)

print(f"Unstructured data dictionary saved to: {unstructured_dictionary_file_path}")
Object folder path: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects
Unstructured data dictionary saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/unstructured_data_dict.pkl

Put data in the right format¶

We have to separate the X features into time and others (price, variance, GARCH parameters) as these will need to be treated differently when it comes to normalization.

In [77]:
from typing import Union, Dict, Optional
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler

def df_to_raw_X_y_dictionary(data,
                             ticker="Unnamed ticker",
                             window_size=60,
                             horizon=60,
                             rv_y = True,
                             lg_return_y = False,
                             garch_var_x = True,
                             best_param_x=True,
                             best_param_y=True,
                             volume_x=True,
                             x_lg_return=False,
                             best_var_x = False,
                             best_var_y = False
                             ):
    assert rv_y or lg_return_y, "At least one of rv_y or lg_return_y must be True"

    df_copy = data.copy()

    if len(df_copy) < window_size + horizon:
        print(f"Skipping {ticker}: Not enough data.")
        return None

    if "date" not in df_copy.columns or not pd.api.types.is_datetime64_any_dtype(df_copy["date"]):
        raise ValueError(f"Data for {ticker} must include a 'date' column in datetime format.")

    # Time-based features
    df_copy["x_time_day_of_week"] = df_copy["date"].dt.dayofweek
    df_copy["x_time_day_of_month"] = df_copy["date"].dt.day
    df_copy["x_time_day_of_year"] = df_copy["date"].dt.dayofyear
    df_copy["x_time_week_of_year"] = df_copy["date"].dt.isocalendar().week.astype(int)
    df_copy["x_time_month_of_year"] = df_copy["date"].dt.month
    df_copy["x_time_is_quarter_end"] = df_copy["date"].dt.is_quarter_end.astype(int)
    df_copy["x_time_years_since_2000"] = df_copy["date"].dt.year - 2000
    df_copy["x_time_days_since_2000"] = (df_copy["date"] - pd.Timestamp("2000-01-01")).dt.days
    df_copy["x_time_months_since_2000"] = (df_copy["x_time_years_since_2000"] * df_copy["x_time_month_of_year"] - 1)

    # Sin/cos cyclical re-codings for time features
    df_copy["x_time_dow_sin"] = np.sin(2 * np.pi * df_copy["x_time_day_of_week"] / 7)
    df_copy["x_time_dow_cos"] = np.cos(2 * np.pi * df_copy["x_time_day_of_week"] / 7)
    df_copy["x_time_dom_sin"] = np.sin(2 * np.pi * df_copy["x_time_day_of_month"] / 31)
    df_copy["x_time_dom_cos"] = np.cos(2 * np.pi * df_copy["x_time_day_of_month"] / 31)
    df_copy["x_time_doy_sin"] = np.sin(2 * np.pi * df_copy["x_time_day_of_year"] / 365)
    df_copy["x_time_doy_cos"] = np.cos(2 * np.pi * df_copy["x_time_day_of_year"] / 365)
    df_copy["x_time_woy_sin"] = np.sin(2 * np.pi * df_copy["x_time_week_of_year"] / 52)
    df_copy["x_time_woy_cos"] = np.cos(2 * np.pi * df_copy["x_time_week_of_year"] / 52)
    df_copy["x_time_moy_sin"] = np.sin(2 * np.pi * df_copy["x_time_month_of_year"] / 12)
    df_copy["x_time_moy_cos"] = np.cos(2 * np.pi * df_copy["x_time_month_of_year"] / 12)

    # Normalize trend time features
    trend_features = ["x_time_years_since_2000", "x_time_days_since_2000", "x_time_months_since_2000"]
    df_copy[trend_features] = MinMaxScaler().fit_transform(df_copy[trend_features])

    X_time_features = [
        "x_time_dow_sin", "x_time_dow_cos", "x_time_dom_sin", "x_time_dom_cos",
        "x_time_doy_sin", "x_time_doy_cos", "x_time_woy_sin", "x_time_woy_cos",
        "x_time_moy_sin", "x_time_moy_cos", "x_time_is_quarter_end",
        "x_time_years_since_2000", "x_time_days_since_2000", "x_time_months_since_2000"
    ]

    if x_lg_return:
        X_other_features = ["rv", "lg_return"]
    else:
        X_other_features = ["rv"]
    if volume_x:
        X_other_features.append("volume")

    if garch_var_x:
      X_other_features += [col for col in df_copy.columns if col.startswith("x_") and not col.startswith("x_best_") and col.endswith("_var")]

    if best_param_x:
        X_other_features += [col for col in df_copy.columns if col.startswith("x_best_") and not col.endswith("_var")]

    if best_var_x:
        X_other_features += [col for col in df_copy.columns if col.startswith("x_best_") and col.endswith("_var")]

    y_targets=[]
    if rv_y:
        y_targets.append("rv")      
    if lg_return_y:
        y_targets.append("lg_return")

    if best_var_y:
      y_targets += [col for col in df_copy.columns if col.startswith("y_best_") and col.endswith("_var")]
    
    if best_param_y:
        y_targets += [col for col in df_copy.columns if col.startswith("y_best_") and not col.endswith("_var")]

    required_columns = X_time_features + X_other_features + y_targets
    df_copy = df_copy.dropna(subset=required_columns).reset_index(drop=True)

    X_other_all, X_time_all, y_all = [], [], []

    for i in range(window_size, len(df_copy) - horizon + 1):
        # Input: t-27:t-0
        other_window = df_copy.loc[i - window_size:i - 1, X_other_features].values
        time_window = df_copy.loc[i - window_size:i - 1, X_time_features].values

        # Targets: t1:t+28
        y_horizon = df_copy.loc[i:i + horizon - 1, y_targets].values

        X_other_all.append(other_window)
        X_time_all.append(time_window)
        y_all.append(y_horizon)

    X_other_all = np.asarray(X_other_all, dtype=np.float64)
    X_time_all = np.asarray(X_time_all, dtype=np.float64)
    y_all = np.asarray(y_all, dtype=np.float64)

    X_other_columns = [f"{f}_t-{window_size - t - 1}" for t in range(window_size) for f in X_other_features]
    X_time_columns = [f"{f}_t-{window_size - t - 1}" for t in range(window_size) for f in X_time_features]
    y_columns = [f"{f}_t+{t+1}" for t in range(horizon) for f in y_targets]

    return {
        "X_other": X_other_all,
        "X_time": X_time_all,
        "y": y_all,
        "X_other_columns": X_other_columns,
        "X_time_columns": X_time_columns,
        "y_columns": y_columns
    }

Data format v1¶

In [78]:
structured_data_dict = {}

for ticker, df in df_dict.items():
    ticker_data = df_to_raw_X_y_dictionary(df, ticker=ticker, lg_return_y=True,rv_y = True, garch_var_x= False, best_param_x=True, best_param_y=True, volume_x=True)
    if ticker_data is not None:
        structured_data_dict[ticker] = ticker_data

print("Structured data dictionary created")
Structured data dictionary created

Saving data to objects to be used later¶

In [79]:
import os
import pickle

objects_relative_path = "Saved_objects"

save_data_object_path = os.path.join(root_folder, objects_relative_path)
print("Object folder path:", save_data_object_path)
os.makedirs(save_data_object_path, exist_ok=True)

data_dictionary_file_path = os.path.join(save_data_object_path, "structured_data_dict.pkl")

with open(data_dictionary_file_path, "wb") as f:
    pickle.dump(structured_data_dict, f)

print(f"Data dictionary saved to: {data_dictionary_file_path}")
Object folder path: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects
Data dictionary saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/structured_data_dict.pkl

Extracting and saving some additional data compatible with Benchmark models¶

In [80]:
structured_data_dict_2 = {}

for ticker, df in df_dict.items():
    ticker_data = df_to_raw_X_y_dictionary(df, ticker=ticker, lg_return_y=True,rv_y = True, garch_var_x= False, best_param_x=True, best_param_y=True, volume_x=False, )
    if ticker_data is not None:
        structured_data_dict_2[ticker] = ticker_data

print("Structured data dictionary 2 created")


data_dictionary_2_file_path = os.path.join(save_data_object_path, "structured_data_dict_2.pkl")

with open(data_dictionary_2_file_path, "wb") as f:
    pickle.dump(structured_data_dict_2, f)

print(f"Data dictionary 2 saved to: {data_dictionary_2_file_path}")
Structured data dictionary 2 created
Data dictionary 2 saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/structured_data_dict_2.pkl

Data v3¶

In [81]:
structured_data_dict_3 = {}

for ticker, df in df_dict.items():
    ticker_data = df_to_raw_X_y_dictionary(df, ticker=ticker, lg_return_y=True,rv_y = True, garch_var_x= False, best_param_x=False, best_param_y=True, volume_x=False, )
    if ticker_data is not None:
        structured_data_dict_3[ticker] = ticker_data

print("Structured data dictionary 3 created")


data_dictionary_3_file_path = os.path.join(save_data_object_path, "structured_data_dict_3.pkl")

with open(data_dictionary_3_file_path, "wb") as f:
    pickle.dump(structured_data_dict_3, f)

print(f"Data dictionary 3 saved to: {data_dictionary_3_file_path}")
Structured data dictionary 3 created
Data dictionary 3 saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/structured_data_dict_3.pkl

Data v4¶

In [82]:
structured_data_dict_4 = {}

for ticker, df in df_dict.items():
    ticker_data = df_to_raw_X_y_dictionary(df, ticker=ticker, lg_return_y=True,rv_y = True, garch_var_x= False, best_param_x=False, best_param_y=False, volume_x=False, )
    if ticker_data is not None:
        structured_data_dict_4[ticker] = ticker_data

print("Structured data dictionary 4 created")


data_dictionary_4_file_path = os.path.join(save_data_object_path, "structured_data_dict_4.pkl")

with open(data_dictionary_4_file_path, "wb") as f:
    pickle.dump(structured_data_dict_4, f)

print(f"Data dictionary 4 saved to: {data_dictionary_4_file_path}")
Structured data dictionary 4 created
Data dictionary 4 saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/structured_data_dict_4.pkl

Data v5¶

In [83]:
structured_data_dict_5 = {}

for ticker, df in df_dict.items():
    ticker_data = df_to_raw_X_y_dictionary(df, ticker=ticker, lg_return_y=False, rv_y=True, garch_var_x=False, best_param_x=False, best_param_y=False, volume_x=False, x_lg_return=False)
    if ticker_data is not None:
        structured_data_dict_5[ticker] = ticker_data

print("Structured data dictionary 5 created")


data_dictionary_5_file_path = os.path.join(save_data_object_path, "structured_data_dict_5.pkl")

with open(data_dictionary_5_file_path, "wb") as f:
    pickle.dump(structured_data_dict_5, f)

print(f"Data dictionary 5 saved to: {data_dictionary_5_file_path}")
Structured data dictionary 5 created
Data dictionary 5 saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/structured_data_dict_5.pkl

Data v6¶

In [84]:
structured_data_dict_6 = {}

for ticker, df in df_dict.items():
    ticker_data = df_to_raw_X_y_dictionary(df, ticker=ticker, lg_return_y=False, rv_y=True, garch_var_x=True, best_param_x=True, best_param_y=True, volume_x=False, x_lg_return=False)
    if ticker_data is not None:
        structured_data_dict_6[ticker] = ticker_data

print("Structured data dictionary 6 created")


data_dictionary_6_file_path = os.path.join(save_data_object_path, "structured_data_dict_6.pkl")

with open(data_dictionary_6_file_path, "wb") as f:
    pickle.dump(structured_data_dict_6, f)

print(f"Data dictionary 6 saved to: {data_dictionary_6_file_path}")
Structured data dictionary 6 created
Data dictionary 6 saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/structured_data_dict_6.pkl

Data v7¶

In [85]:
structured_data_dict_7 = {}

for ticker, df in df_dict.items():
    ticker_data = df_to_raw_X_y_dictionary(df, ticker=ticker, lg_return_y=False, rv_y=True, garch_var_x=False, best_param_x=True, best_param_y=True, volume_x=False, x_lg_return=False)
    if ticker_data is not None:
        structured_data_dict_7[ticker] = ticker_data

print("Structured data dictionary 7 created")


data_dictionary_7_file_path = os.path.join(save_data_object_path, "structured_data_dict_7.pkl")

with open(data_dictionary_7_file_path, "wb") as f:
    pickle.dump(structured_data_dict_7, f)

print(f"Data dictionary 7 saved to: {data_dictionary_7_file_path}")
Structured data dictionary 7 created
Data dictionary 7 saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/structured_data_dict_7.pkl

Data v8¶

In [86]:
structured_data_dict_8 = {}

for ticker, df in df_dict.items():
    ticker_data = df_to_raw_X_y_dictionary(df, ticker=ticker, lg_return_y=False, rv_y=True, garch_var_x=False, best_param_x=False, best_param_y=True, volume_x=False, x_lg_return=False)
    if ticker_data is not None:
        structured_data_dict_8[ticker] = ticker_data

print("Structured data dictionary 8 created")


data_dictionary_8_file_path = os.path.join(save_data_object_path, "structured_data_dict_8.pkl")

with open(data_dictionary_8_file_path, "wb") as f:
    pickle.dump(structured_data_dict_8, f)

print(f"Data dictionary 8 saved to: {data_dictionary_8_file_path}")
Structured data dictionary 8 created
Data dictionary 8 saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/structured_data_dict_8.pkl

Data v9¶

In [87]:
structured_data_dict_9 = {}

for ticker, df in df_dict.items():
    ticker_data = df_to_raw_X_y_dictionary(df, ticker=ticker, lg_return_y=False, rv_y=True, garch_var_x=False, best_param_x=False, best_param_y=False, volume_x=False, x_lg_return=False, best_var_x=True, best_var_y=True)
    if ticker_data is not None:
        structured_data_dict_9[ticker] = ticker_data

print("Structured data dictionary 9 created")


data_dictionary_9_file_path = os.path.join(save_data_object_path, "structured_data_dict_9.pkl")

with open(data_dictionary_9_file_path, "wb") as f:
    pickle.dump(structured_data_dict_9, f)

print(f"Data dictionary 9 saved to: {data_dictionary_9_file_path}")
Structured data dictionary 9 created
Data dictionary 9 saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/structured_data_dict_9.pkl

Data v10¶

In [142]:
structured_data_dict_10 = {}

for ticker, df in df_dict.items():
    ticker_data = df_to_raw_X_y_dictionary(df, ticker=ticker, lg_return_y=True, rv_y=True, garch_var_x=False, best_param_x=False, best_param_y=False, volume_x=False, x_lg_return=True, best_var_x=False, best_var_y=False)
    if ticker_data is not None:
        structured_data_dict_10[ticker] = ticker_data

print("Structured data dictionary 10 created")


data_dictionary_10_file_path = os.path.join(save_data_object_path, "structured_data_dict_10.pkl")

with open(data_dictionary_10_file_path, "wb") as f:
    pickle.dump(structured_data_dict_10, f)

print(f"Data dictionary 10 saved to: {data_dictionary_10_file_path}")
Structured data dictionary 10 created
Data dictionary 10 saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/structured_data_dict_10.pkl

Data v11¶

In [89]:
structured_data_dict_11 = {}

for ticker, df in df_dict.items():
    ticker_data = df_to_raw_X_y_dictionary(df, ticker=ticker, lg_return_y=False, rv_y=True, garch_var_x=False, best_param_x=True, best_param_y=False, volume_x=False, x_lg_return=False, best_var_x=True, best_var_y=False)
    if ticker_data is not None:
        structured_data_dict_11[ticker] = ticker_data

print("Structured data dictionary 11 created")


data_dictionary_11_file_path = os.path.join(save_data_object_path, "structured_data_dict_11.pkl")

with open(data_dictionary_11_file_path, "wb") as f:
    pickle.dump(structured_data_dict_11, f)

print(f"Data dictionary 11 saved to: {data_dictionary_11_file_path}")
Structured data dictionary 11 created
Data dictionary 11 saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/structured_data_dict_11.pkl

Helper function to check data integrity¶

Needed becasue it is called when training and testing models

In [90]:
import numpy as np

def check_data_issues(X, label="Array"):
    print(f"Checking {label}:")
    print("Shape:", X.shape)

    has_nan = np.isnan(X).any()
    has_posinf = np.isposinf(X).any()
    has_neginf = np.isneginf(X).any()
    has_inf = np.isinf(X).any()
    min_val = np.nanmin(X)
    max_val = np.nanmax(X)

    print(f"  Contains NaN values:      {has_nan}")
    print(f"  Contains +inf values:     {has_posinf}")
    print(f"  Contains -inf values:     {has_neginf}")
    print(f"  Contains any infinities:  {has_inf}")
    print("  Max value: ", max_val)
    print("  Min value: ", min_val)

Checking some examples of the data to see that it looks good¶

In [91]:
aapl_X_price = structured_data_dict["AAPL"]["X_other"]
aapl_X_price_columns = structured_data_dict["AAPL"]["X_other_columns"]
aapl_X_time = structured_data_dict["AAPL"]["X_time"]
aapl_X_time_columns = structured_data_dict["AAPL"]["X_time_columns"]
aapl_y = structured_data_dict["AAPL"]["y"]
aapl_y_columns = structured_data_dict["AAPL"]["y_columns"]

print("\n --- AAPL X_price columns: ---")
for col_name in aapl_X_price_columns:
  print(col_name)

print("\n --- AAPL X_time columns: ---")
for col_name in aapl_X_time_columns:
  print(col_name)

print("\n ---AAPL y columns:---")
for col_name in aapl_y_columns:
  print(col_name)

check_data_issues(aapl_X_price, label="aapl_price")
check_data_issues(aapl_X_time, label="aapl_time")
check_data_issues(aapl_y, label="aapl_y")

print("\n shape(X) for AAPL:")
print(aapl_X_price.shape)
print("\n X for AAPL:")
print(aapl_X_price)
print("\n shape(Time) for AAPL:")
print(aapl_X_time.shape)
print("\n Time for AAPL:")
print(aapl_X_time)
print("\n shape(Y) for AAPL:")
print(aapl_y.shape)
print("\n Y for AAPL:")
print(aapl_y)
 --- AAPL X_price columns: ---
rv_t-59
volume_t-59
x_best_alpha_1_t-59
x_best_beta_1_t-59
x_best_omega_t-59
x_best_mu_t-59
x_best_nu_tgarch_t-59
rv_t-58
volume_t-58
x_best_alpha_1_t-58
x_best_beta_1_t-58
x_best_omega_t-58
x_best_mu_t-58
x_best_nu_tgarch_t-58
rv_t-57
volume_t-57
x_best_alpha_1_t-57
x_best_beta_1_t-57
x_best_omega_t-57
x_best_mu_t-57
x_best_nu_tgarch_t-57
rv_t-56
volume_t-56
x_best_alpha_1_t-56
x_best_beta_1_t-56
x_best_omega_t-56
x_best_mu_t-56
x_best_nu_tgarch_t-56
rv_t-55
volume_t-55
x_best_alpha_1_t-55
x_best_beta_1_t-55
x_best_omega_t-55
x_best_mu_t-55
x_best_nu_tgarch_t-55
rv_t-54
volume_t-54
x_best_alpha_1_t-54
x_best_beta_1_t-54
x_best_omega_t-54
x_best_mu_t-54
x_best_nu_tgarch_t-54
rv_t-53
volume_t-53
x_best_alpha_1_t-53
x_best_beta_1_t-53
x_best_omega_t-53
x_best_mu_t-53
x_best_nu_tgarch_t-53
rv_t-52
volume_t-52
x_best_alpha_1_t-52
x_best_beta_1_t-52
x_best_omega_t-52
x_best_mu_t-52
x_best_nu_tgarch_t-52
rv_t-51
volume_t-51
x_best_alpha_1_t-51
x_best_beta_1_t-51
x_best_omega_t-51
x_best_mu_t-51
x_best_nu_tgarch_t-51
rv_t-50
volume_t-50
x_best_alpha_1_t-50
x_best_beta_1_t-50
x_best_omega_t-50
x_best_mu_t-50
x_best_nu_tgarch_t-50
rv_t-49
volume_t-49
x_best_alpha_1_t-49
x_best_beta_1_t-49
x_best_omega_t-49
x_best_mu_t-49
x_best_nu_tgarch_t-49
rv_t-48
volume_t-48
x_best_alpha_1_t-48
x_best_beta_1_t-48
x_best_omega_t-48
x_best_mu_t-48
x_best_nu_tgarch_t-48
rv_t-47
volume_t-47
x_best_alpha_1_t-47
x_best_beta_1_t-47
x_best_omega_t-47
x_best_mu_t-47
x_best_nu_tgarch_t-47
rv_t-46
volume_t-46
x_best_alpha_1_t-46
x_best_beta_1_t-46
x_best_omega_t-46
x_best_mu_t-46
x_best_nu_tgarch_t-46
rv_t-45
volume_t-45
x_best_alpha_1_t-45
x_best_beta_1_t-45
x_best_omega_t-45
x_best_mu_t-45
x_best_nu_tgarch_t-45
rv_t-44
volume_t-44
x_best_alpha_1_t-44
x_best_beta_1_t-44
x_best_omega_t-44
x_best_mu_t-44
x_best_nu_tgarch_t-44
rv_t-43
volume_t-43
x_best_alpha_1_t-43
x_best_beta_1_t-43
x_best_omega_t-43
x_best_mu_t-43
x_best_nu_tgarch_t-43
rv_t-42
volume_t-42
x_best_alpha_1_t-42
x_best_beta_1_t-42
x_best_omega_t-42
x_best_mu_t-42
x_best_nu_tgarch_t-42
rv_t-41
volume_t-41
x_best_alpha_1_t-41
x_best_beta_1_t-41
x_best_omega_t-41
x_best_mu_t-41
x_best_nu_tgarch_t-41
rv_t-40
volume_t-40
x_best_alpha_1_t-40
x_best_beta_1_t-40
x_best_omega_t-40
x_best_mu_t-40
x_best_nu_tgarch_t-40
rv_t-39
volume_t-39
x_best_alpha_1_t-39
x_best_beta_1_t-39
x_best_omega_t-39
x_best_mu_t-39
x_best_nu_tgarch_t-39
rv_t-38
volume_t-38
x_best_alpha_1_t-38
x_best_beta_1_t-38
x_best_omega_t-38
x_best_mu_t-38
x_best_nu_tgarch_t-38
rv_t-37
volume_t-37
x_best_alpha_1_t-37
x_best_beta_1_t-37
x_best_omega_t-37
x_best_mu_t-37
x_best_nu_tgarch_t-37
rv_t-36
volume_t-36
x_best_alpha_1_t-36
x_best_beta_1_t-36
x_best_omega_t-36
x_best_mu_t-36
x_best_nu_tgarch_t-36
rv_t-35
volume_t-35
x_best_alpha_1_t-35
x_best_beta_1_t-35
x_best_omega_t-35
x_best_mu_t-35
x_best_nu_tgarch_t-35
rv_t-34
volume_t-34
x_best_alpha_1_t-34
x_best_beta_1_t-34
x_best_omega_t-34
x_best_mu_t-34
x_best_nu_tgarch_t-34
rv_t-33
volume_t-33
x_best_alpha_1_t-33
x_best_beta_1_t-33
x_best_omega_t-33
x_best_mu_t-33
x_best_nu_tgarch_t-33
rv_t-32
volume_t-32
x_best_alpha_1_t-32
x_best_beta_1_t-32
x_best_omega_t-32
x_best_mu_t-32
x_best_nu_tgarch_t-32
rv_t-31
volume_t-31
x_best_alpha_1_t-31
x_best_beta_1_t-31
x_best_omega_t-31
x_best_mu_t-31
x_best_nu_tgarch_t-31
rv_t-30
volume_t-30
x_best_alpha_1_t-30
x_best_beta_1_t-30
x_best_omega_t-30
x_best_mu_t-30
x_best_nu_tgarch_t-30
rv_t-29
volume_t-29
x_best_alpha_1_t-29
x_best_beta_1_t-29
x_best_omega_t-29
x_best_mu_t-29
x_best_nu_tgarch_t-29
rv_t-28
volume_t-28
x_best_alpha_1_t-28
x_best_beta_1_t-28
x_best_omega_t-28
x_best_mu_t-28
x_best_nu_tgarch_t-28
rv_t-27
volume_t-27
x_best_alpha_1_t-27
x_best_beta_1_t-27
x_best_omega_t-27
x_best_mu_t-27
x_best_nu_tgarch_t-27
rv_t-26
volume_t-26
x_best_alpha_1_t-26
x_best_beta_1_t-26
x_best_omega_t-26
x_best_mu_t-26
x_best_nu_tgarch_t-26
rv_t-25
volume_t-25
x_best_alpha_1_t-25
x_best_beta_1_t-25
x_best_omega_t-25
x_best_mu_t-25
x_best_nu_tgarch_t-25
rv_t-24
volume_t-24
x_best_alpha_1_t-24
x_best_beta_1_t-24
x_best_omega_t-24
x_best_mu_t-24
x_best_nu_tgarch_t-24
rv_t-23
volume_t-23
x_best_alpha_1_t-23
x_best_beta_1_t-23
x_best_omega_t-23
x_best_mu_t-23
x_best_nu_tgarch_t-23
rv_t-22
volume_t-22
x_best_alpha_1_t-22
x_best_beta_1_t-22
x_best_omega_t-22
x_best_mu_t-22
x_best_nu_tgarch_t-22
rv_t-21
volume_t-21
x_best_alpha_1_t-21
x_best_beta_1_t-21
x_best_omega_t-21
x_best_mu_t-21
x_best_nu_tgarch_t-21
rv_t-20
volume_t-20
x_best_alpha_1_t-20
x_best_beta_1_t-20
x_best_omega_t-20
x_best_mu_t-20
x_best_nu_tgarch_t-20
rv_t-19
volume_t-19
x_best_alpha_1_t-19
x_best_beta_1_t-19
x_best_omega_t-19
x_best_mu_t-19
x_best_nu_tgarch_t-19
rv_t-18
volume_t-18
x_best_alpha_1_t-18
x_best_beta_1_t-18
x_best_omega_t-18
x_best_mu_t-18
x_best_nu_tgarch_t-18
rv_t-17
volume_t-17
x_best_alpha_1_t-17
x_best_beta_1_t-17
x_best_omega_t-17
x_best_mu_t-17
x_best_nu_tgarch_t-17
rv_t-16
volume_t-16
x_best_alpha_1_t-16
x_best_beta_1_t-16
x_best_omega_t-16
x_best_mu_t-16
x_best_nu_tgarch_t-16
rv_t-15
volume_t-15
x_best_alpha_1_t-15
x_best_beta_1_t-15
x_best_omega_t-15
x_best_mu_t-15
x_best_nu_tgarch_t-15
rv_t-14
volume_t-14
x_best_alpha_1_t-14
x_best_beta_1_t-14
x_best_omega_t-14
x_best_mu_t-14
x_best_nu_tgarch_t-14
rv_t-13
volume_t-13
x_best_alpha_1_t-13
x_best_beta_1_t-13
x_best_omega_t-13
x_best_mu_t-13
x_best_nu_tgarch_t-13
rv_t-12
volume_t-12
x_best_alpha_1_t-12
x_best_beta_1_t-12
x_best_omega_t-12
x_best_mu_t-12
x_best_nu_tgarch_t-12
rv_t-11
volume_t-11
x_best_alpha_1_t-11
x_best_beta_1_t-11
x_best_omega_t-11
x_best_mu_t-11
x_best_nu_tgarch_t-11
rv_t-10
volume_t-10
x_best_alpha_1_t-10
x_best_beta_1_t-10
x_best_omega_t-10
x_best_mu_t-10
x_best_nu_tgarch_t-10
rv_t-9
volume_t-9
x_best_alpha_1_t-9
x_best_beta_1_t-9
x_best_omega_t-9
x_best_mu_t-9
x_best_nu_tgarch_t-9
rv_t-8
volume_t-8
x_best_alpha_1_t-8
x_best_beta_1_t-8
x_best_omega_t-8
x_best_mu_t-8
x_best_nu_tgarch_t-8
rv_t-7
volume_t-7
x_best_alpha_1_t-7
x_best_beta_1_t-7
x_best_omega_t-7
x_best_mu_t-7
x_best_nu_tgarch_t-7
rv_t-6
volume_t-6
x_best_alpha_1_t-6
x_best_beta_1_t-6
x_best_omega_t-6
x_best_mu_t-6
x_best_nu_tgarch_t-6
rv_t-5
volume_t-5
x_best_alpha_1_t-5
x_best_beta_1_t-5
x_best_omega_t-5
x_best_mu_t-5
x_best_nu_tgarch_t-5
rv_t-4
volume_t-4
x_best_alpha_1_t-4
x_best_beta_1_t-4
x_best_omega_t-4
x_best_mu_t-4
x_best_nu_tgarch_t-4
rv_t-3
volume_t-3
x_best_alpha_1_t-3
x_best_beta_1_t-3
x_best_omega_t-3
x_best_mu_t-3
x_best_nu_tgarch_t-3
rv_t-2
volume_t-2
x_best_alpha_1_t-2
x_best_beta_1_t-2
x_best_omega_t-2
x_best_mu_t-2
x_best_nu_tgarch_t-2
rv_t-1
volume_t-1
x_best_alpha_1_t-1
x_best_beta_1_t-1
x_best_omega_t-1
x_best_mu_t-1
x_best_nu_tgarch_t-1
rv_t-0
volume_t-0
x_best_alpha_1_t-0
x_best_beta_1_t-0
x_best_omega_t-0
x_best_mu_t-0
x_best_nu_tgarch_t-0

 --- AAPL X_time columns: ---
x_time_dow_sin_t-59
x_time_dow_cos_t-59
x_time_dom_sin_t-59
x_time_dom_cos_t-59
x_time_doy_sin_t-59
x_time_doy_cos_t-59
x_time_woy_sin_t-59
x_time_woy_cos_t-59
x_time_moy_sin_t-59
x_time_moy_cos_t-59
x_time_is_quarter_end_t-59
x_time_years_since_2000_t-59
x_time_days_since_2000_t-59
x_time_months_since_2000_t-59
x_time_dow_sin_t-58
x_time_dow_cos_t-58
x_time_dom_sin_t-58
x_time_dom_cos_t-58
x_time_doy_sin_t-58
x_time_doy_cos_t-58
x_time_woy_sin_t-58
x_time_woy_cos_t-58
x_time_moy_sin_t-58
x_time_moy_cos_t-58
x_time_is_quarter_end_t-58
x_time_years_since_2000_t-58
x_time_days_since_2000_t-58
x_time_months_since_2000_t-58
x_time_dow_sin_t-57
x_time_dow_cos_t-57
x_time_dom_sin_t-57
x_time_dom_cos_t-57
x_time_doy_sin_t-57
x_time_doy_cos_t-57
x_time_woy_sin_t-57
x_time_woy_cos_t-57
x_time_moy_sin_t-57
x_time_moy_cos_t-57
x_time_is_quarter_end_t-57
x_time_years_since_2000_t-57
x_time_days_since_2000_t-57
x_time_months_since_2000_t-57
x_time_dow_sin_t-56
x_time_dow_cos_t-56
x_time_dom_sin_t-56
x_time_dom_cos_t-56
x_time_doy_sin_t-56
x_time_doy_cos_t-56
x_time_woy_sin_t-56
x_time_woy_cos_t-56
x_time_moy_sin_t-56
x_time_moy_cos_t-56
x_time_is_quarter_end_t-56
x_time_years_since_2000_t-56
x_time_days_since_2000_t-56
x_time_months_since_2000_t-56
x_time_dow_sin_t-55
x_time_dow_cos_t-55
x_time_dom_sin_t-55
x_time_dom_cos_t-55
x_time_doy_sin_t-55
x_time_doy_cos_t-55
x_time_woy_sin_t-55
x_time_woy_cos_t-55
x_time_moy_sin_t-55
x_time_moy_cos_t-55
x_time_is_quarter_end_t-55
x_time_years_since_2000_t-55
x_time_days_since_2000_t-55
x_time_months_since_2000_t-55
x_time_dow_sin_t-54
x_time_dow_cos_t-54
x_time_dom_sin_t-54
x_time_dom_cos_t-54
x_time_doy_sin_t-54
x_time_doy_cos_t-54
x_time_woy_sin_t-54
x_time_woy_cos_t-54
x_time_moy_sin_t-54
x_time_moy_cos_t-54
x_time_is_quarter_end_t-54
x_time_years_since_2000_t-54
x_time_days_since_2000_t-54
x_time_months_since_2000_t-54
x_time_dow_sin_t-53
x_time_dow_cos_t-53
x_time_dom_sin_t-53
x_time_dom_cos_t-53
x_time_doy_sin_t-53
x_time_doy_cos_t-53
x_time_woy_sin_t-53
x_time_woy_cos_t-53
x_time_moy_sin_t-53
x_time_moy_cos_t-53
x_time_is_quarter_end_t-53
x_time_years_since_2000_t-53
x_time_days_since_2000_t-53
x_time_months_since_2000_t-53
x_time_dow_sin_t-52
x_time_dow_cos_t-52
x_time_dom_sin_t-52
x_time_dom_cos_t-52
x_time_doy_sin_t-52
x_time_doy_cos_t-52
x_time_woy_sin_t-52
x_time_woy_cos_t-52
x_time_moy_sin_t-52
x_time_moy_cos_t-52
x_time_is_quarter_end_t-52
x_time_years_since_2000_t-52
x_time_days_since_2000_t-52
x_time_months_since_2000_t-52
x_time_dow_sin_t-51
x_time_dow_cos_t-51
x_time_dom_sin_t-51
x_time_dom_cos_t-51
x_time_doy_sin_t-51
x_time_doy_cos_t-51
x_time_woy_sin_t-51
x_time_woy_cos_t-51
x_time_moy_sin_t-51
x_time_moy_cos_t-51
x_time_is_quarter_end_t-51
x_time_years_since_2000_t-51
x_time_days_since_2000_t-51
x_time_months_since_2000_t-51
x_time_dow_sin_t-50
x_time_dow_cos_t-50
x_time_dom_sin_t-50
x_time_dom_cos_t-50
x_time_doy_sin_t-50
x_time_doy_cos_t-50
x_time_woy_sin_t-50
x_time_woy_cos_t-50
x_time_moy_sin_t-50
x_time_moy_cos_t-50
x_time_is_quarter_end_t-50
x_time_years_since_2000_t-50
x_time_days_since_2000_t-50
x_time_months_since_2000_t-50
x_time_dow_sin_t-49
x_time_dow_cos_t-49
x_time_dom_sin_t-49
x_time_dom_cos_t-49
x_time_doy_sin_t-49
x_time_doy_cos_t-49
x_time_woy_sin_t-49
x_time_woy_cos_t-49
x_time_moy_sin_t-49
x_time_moy_cos_t-49
x_time_is_quarter_end_t-49
x_time_years_since_2000_t-49
x_time_days_since_2000_t-49
x_time_months_since_2000_t-49
x_time_dow_sin_t-48
x_time_dow_cos_t-48
x_time_dom_sin_t-48
x_time_dom_cos_t-48
x_time_doy_sin_t-48
x_time_doy_cos_t-48
x_time_woy_sin_t-48
x_time_woy_cos_t-48
x_time_moy_sin_t-48
x_time_moy_cos_t-48
x_time_is_quarter_end_t-48
x_time_years_since_2000_t-48
x_time_days_since_2000_t-48
x_time_months_since_2000_t-48
x_time_dow_sin_t-47
x_time_dow_cos_t-47
x_time_dom_sin_t-47
x_time_dom_cos_t-47
x_time_doy_sin_t-47
x_time_doy_cos_t-47
x_time_woy_sin_t-47
x_time_woy_cos_t-47
x_time_moy_sin_t-47
x_time_moy_cos_t-47
x_time_is_quarter_end_t-47
x_time_years_since_2000_t-47
x_time_days_since_2000_t-47
x_time_months_since_2000_t-47
x_time_dow_sin_t-46
x_time_dow_cos_t-46
x_time_dom_sin_t-46
x_time_dom_cos_t-46
x_time_doy_sin_t-46
x_time_doy_cos_t-46
x_time_woy_sin_t-46
x_time_woy_cos_t-46
x_time_moy_sin_t-46
x_time_moy_cos_t-46
x_time_is_quarter_end_t-46
x_time_years_since_2000_t-46
x_time_days_since_2000_t-46
x_time_months_since_2000_t-46
x_time_dow_sin_t-45
x_time_dow_cos_t-45
x_time_dom_sin_t-45
x_time_dom_cos_t-45
x_time_doy_sin_t-45
x_time_doy_cos_t-45
x_time_woy_sin_t-45
x_time_woy_cos_t-45
x_time_moy_sin_t-45
x_time_moy_cos_t-45
x_time_is_quarter_end_t-45
x_time_years_since_2000_t-45
x_time_days_since_2000_t-45
x_time_months_since_2000_t-45
x_time_dow_sin_t-44
x_time_dow_cos_t-44
x_time_dom_sin_t-44
x_time_dom_cos_t-44
x_time_doy_sin_t-44
x_time_doy_cos_t-44
x_time_woy_sin_t-44
x_time_woy_cos_t-44
x_time_moy_sin_t-44
x_time_moy_cos_t-44
x_time_is_quarter_end_t-44
x_time_years_since_2000_t-44
x_time_days_since_2000_t-44
x_time_months_since_2000_t-44
x_time_dow_sin_t-43
x_time_dow_cos_t-43
x_time_dom_sin_t-43
x_time_dom_cos_t-43
x_time_doy_sin_t-43
x_time_doy_cos_t-43
x_time_woy_sin_t-43
x_time_woy_cos_t-43
x_time_moy_sin_t-43
x_time_moy_cos_t-43
x_time_is_quarter_end_t-43
x_time_years_since_2000_t-43
x_time_days_since_2000_t-43
x_time_months_since_2000_t-43
x_time_dow_sin_t-42
x_time_dow_cos_t-42
x_time_dom_sin_t-42
x_time_dom_cos_t-42
x_time_doy_sin_t-42
x_time_doy_cos_t-42
x_time_woy_sin_t-42
x_time_woy_cos_t-42
x_time_moy_sin_t-42
x_time_moy_cos_t-42
x_time_is_quarter_end_t-42
x_time_years_since_2000_t-42
x_time_days_since_2000_t-42
x_time_months_since_2000_t-42
x_time_dow_sin_t-41
x_time_dow_cos_t-41
x_time_dom_sin_t-41
x_time_dom_cos_t-41
x_time_doy_sin_t-41
x_time_doy_cos_t-41
x_time_woy_sin_t-41
x_time_woy_cos_t-41
x_time_moy_sin_t-41
x_time_moy_cos_t-41
x_time_is_quarter_end_t-41
x_time_years_since_2000_t-41
x_time_days_since_2000_t-41
x_time_months_since_2000_t-41
x_time_dow_sin_t-40
x_time_dow_cos_t-40
x_time_dom_sin_t-40
x_time_dom_cos_t-40
x_time_doy_sin_t-40
x_time_doy_cos_t-40
x_time_woy_sin_t-40
x_time_woy_cos_t-40
x_time_moy_sin_t-40
x_time_moy_cos_t-40
x_time_is_quarter_end_t-40
x_time_years_since_2000_t-40
x_time_days_since_2000_t-40
x_time_months_since_2000_t-40
x_time_dow_sin_t-39
x_time_dow_cos_t-39
x_time_dom_sin_t-39
x_time_dom_cos_t-39
x_time_doy_sin_t-39
x_time_doy_cos_t-39
x_time_woy_sin_t-39
x_time_woy_cos_t-39
x_time_moy_sin_t-39
x_time_moy_cos_t-39
x_time_is_quarter_end_t-39
x_time_years_since_2000_t-39
x_time_days_since_2000_t-39
x_time_months_since_2000_t-39
x_time_dow_sin_t-38
x_time_dow_cos_t-38
x_time_dom_sin_t-38
x_time_dom_cos_t-38
x_time_doy_sin_t-38
x_time_doy_cos_t-38
x_time_woy_sin_t-38
x_time_woy_cos_t-38
x_time_moy_sin_t-38
x_time_moy_cos_t-38
x_time_is_quarter_end_t-38
x_time_years_since_2000_t-38
x_time_days_since_2000_t-38
x_time_months_since_2000_t-38
x_time_dow_sin_t-37
x_time_dow_cos_t-37
x_time_dom_sin_t-37
x_time_dom_cos_t-37
x_time_doy_sin_t-37
x_time_doy_cos_t-37
x_time_woy_sin_t-37
x_time_woy_cos_t-37
x_time_moy_sin_t-37
x_time_moy_cos_t-37
x_time_is_quarter_end_t-37
x_time_years_since_2000_t-37
x_time_days_since_2000_t-37
x_time_months_since_2000_t-37
x_time_dow_sin_t-36
x_time_dow_cos_t-36
x_time_dom_sin_t-36
x_time_dom_cos_t-36
x_time_doy_sin_t-36
x_time_doy_cos_t-36
x_time_woy_sin_t-36
x_time_woy_cos_t-36
x_time_moy_sin_t-36
x_time_moy_cos_t-36
x_time_is_quarter_end_t-36
x_time_years_since_2000_t-36
x_time_days_since_2000_t-36
x_time_months_since_2000_t-36
x_time_dow_sin_t-35
x_time_dow_cos_t-35
x_time_dom_sin_t-35
x_time_dom_cos_t-35
x_time_doy_sin_t-35
x_time_doy_cos_t-35
x_time_woy_sin_t-35
x_time_woy_cos_t-35
x_time_moy_sin_t-35
x_time_moy_cos_t-35
x_time_is_quarter_end_t-35
x_time_years_since_2000_t-35
x_time_days_since_2000_t-35
x_time_months_since_2000_t-35
x_time_dow_sin_t-34
x_time_dow_cos_t-34
x_time_dom_sin_t-34
x_time_dom_cos_t-34
x_time_doy_sin_t-34
x_time_doy_cos_t-34
x_time_woy_sin_t-34
x_time_woy_cos_t-34
x_time_moy_sin_t-34
x_time_moy_cos_t-34
x_time_is_quarter_end_t-34
x_time_years_since_2000_t-34
x_time_days_since_2000_t-34
x_time_months_since_2000_t-34
x_time_dow_sin_t-33
x_time_dow_cos_t-33
x_time_dom_sin_t-33
x_time_dom_cos_t-33
x_time_doy_sin_t-33
x_time_doy_cos_t-33
x_time_woy_sin_t-33
x_time_woy_cos_t-33
x_time_moy_sin_t-33
x_time_moy_cos_t-33
x_time_is_quarter_end_t-33
x_time_years_since_2000_t-33
x_time_days_since_2000_t-33
x_time_months_since_2000_t-33
x_time_dow_sin_t-32
x_time_dow_cos_t-32
x_time_dom_sin_t-32
x_time_dom_cos_t-32
x_time_doy_sin_t-32
x_time_doy_cos_t-32
x_time_woy_sin_t-32
x_time_woy_cos_t-32
x_time_moy_sin_t-32
x_time_moy_cos_t-32
x_time_is_quarter_end_t-32
x_time_years_since_2000_t-32
x_time_days_since_2000_t-32
x_time_months_since_2000_t-32
x_time_dow_sin_t-31
x_time_dow_cos_t-31
x_time_dom_sin_t-31
x_time_dom_cos_t-31
x_time_doy_sin_t-31
x_time_doy_cos_t-31
x_time_woy_sin_t-31
x_time_woy_cos_t-31
x_time_moy_sin_t-31
x_time_moy_cos_t-31
x_time_is_quarter_end_t-31
x_time_years_since_2000_t-31
x_time_days_since_2000_t-31
x_time_months_since_2000_t-31
x_time_dow_sin_t-30
x_time_dow_cos_t-30
x_time_dom_sin_t-30
x_time_dom_cos_t-30
x_time_doy_sin_t-30
x_time_doy_cos_t-30
x_time_woy_sin_t-30
x_time_woy_cos_t-30
x_time_moy_sin_t-30
x_time_moy_cos_t-30
x_time_is_quarter_end_t-30
x_time_years_since_2000_t-30
x_time_days_since_2000_t-30
x_time_months_since_2000_t-30
x_time_dow_sin_t-29
x_time_dow_cos_t-29
x_time_dom_sin_t-29
x_time_dom_cos_t-29
x_time_doy_sin_t-29
x_time_doy_cos_t-29
x_time_woy_sin_t-29
x_time_woy_cos_t-29
x_time_moy_sin_t-29
x_time_moy_cos_t-29
x_time_is_quarter_end_t-29
x_time_years_since_2000_t-29
x_time_days_since_2000_t-29
x_time_months_since_2000_t-29
x_time_dow_sin_t-28
x_time_dow_cos_t-28
x_time_dom_sin_t-28
x_time_dom_cos_t-28
x_time_doy_sin_t-28
x_time_doy_cos_t-28
x_time_woy_sin_t-28
x_time_woy_cos_t-28
x_time_moy_sin_t-28
x_time_moy_cos_t-28
x_time_is_quarter_end_t-28
x_time_years_since_2000_t-28
x_time_days_since_2000_t-28
x_time_months_since_2000_t-28
x_time_dow_sin_t-27
x_time_dow_cos_t-27
x_time_dom_sin_t-27
x_time_dom_cos_t-27
x_time_doy_sin_t-27
x_time_doy_cos_t-27
x_time_woy_sin_t-27
x_time_woy_cos_t-27
x_time_moy_sin_t-27
x_time_moy_cos_t-27
x_time_is_quarter_end_t-27
x_time_years_since_2000_t-27
x_time_days_since_2000_t-27
x_time_months_since_2000_t-27
x_time_dow_sin_t-26
x_time_dow_cos_t-26
x_time_dom_sin_t-26
x_time_dom_cos_t-26
x_time_doy_sin_t-26
x_time_doy_cos_t-26
x_time_woy_sin_t-26
x_time_woy_cos_t-26
x_time_moy_sin_t-26
x_time_moy_cos_t-26
x_time_is_quarter_end_t-26
x_time_years_since_2000_t-26
x_time_days_since_2000_t-26
x_time_months_since_2000_t-26
x_time_dow_sin_t-25
x_time_dow_cos_t-25
x_time_dom_sin_t-25
x_time_dom_cos_t-25
x_time_doy_sin_t-25
x_time_doy_cos_t-25
x_time_woy_sin_t-25
x_time_woy_cos_t-25
x_time_moy_sin_t-25
x_time_moy_cos_t-25
x_time_is_quarter_end_t-25
x_time_years_since_2000_t-25
x_time_days_since_2000_t-25
x_time_months_since_2000_t-25
x_time_dow_sin_t-24
x_time_dow_cos_t-24
x_time_dom_sin_t-24
x_time_dom_cos_t-24
x_time_doy_sin_t-24
x_time_doy_cos_t-24
x_time_woy_sin_t-24
x_time_woy_cos_t-24
x_time_moy_sin_t-24
x_time_moy_cos_t-24
x_time_is_quarter_end_t-24
x_time_years_since_2000_t-24
x_time_days_since_2000_t-24
x_time_months_since_2000_t-24
x_time_dow_sin_t-23
x_time_dow_cos_t-23
x_time_dom_sin_t-23
x_time_dom_cos_t-23
x_time_doy_sin_t-23
x_time_doy_cos_t-23
x_time_woy_sin_t-23
x_time_woy_cos_t-23
x_time_moy_sin_t-23
x_time_moy_cos_t-23
x_time_is_quarter_end_t-23
x_time_years_since_2000_t-23
x_time_days_since_2000_t-23
x_time_months_since_2000_t-23
x_time_dow_sin_t-22
x_time_dow_cos_t-22
x_time_dom_sin_t-22
x_time_dom_cos_t-22
x_time_doy_sin_t-22
x_time_doy_cos_t-22
x_time_woy_sin_t-22
x_time_woy_cos_t-22
x_time_moy_sin_t-22
x_time_moy_cos_t-22
x_time_is_quarter_end_t-22
x_time_years_since_2000_t-22
x_time_days_since_2000_t-22
x_time_months_since_2000_t-22
x_time_dow_sin_t-21
x_time_dow_cos_t-21
x_time_dom_sin_t-21
x_time_dom_cos_t-21
x_time_doy_sin_t-21
x_time_doy_cos_t-21
x_time_woy_sin_t-21
x_time_woy_cos_t-21
x_time_moy_sin_t-21
x_time_moy_cos_t-21
x_time_is_quarter_end_t-21
x_time_years_since_2000_t-21
x_time_days_since_2000_t-21
x_time_months_since_2000_t-21
x_time_dow_sin_t-20
x_time_dow_cos_t-20
x_time_dom_sin_t-20
x_time_dom_cos_t-20
x_time_doy_sin_t-20
x_time_doy_cos_t-20
x_time_woy_sin_t-20
x_time_woy_cos_t-20
x_time_moy_sin_t-20
x_time_moy_cos_t-20
x_time_is_quarter_end_t-20
x_time_years_since_2000_t-20
x_time_days_since_2000_t-20
x_time_months_since_2000_t-20
x_time_dow_sin_t-19
x_time_dow_cos_t-19
x_time_dom_sin_t-19
x_time_dom_cos_t-19
x_time_doy_sin_t-19
x_time_doy_cos_t-19
x_time_woy_sin_t-19
x_time_woy_cos_t-19
x_time_moy_sin_t-19
x_time_moy_cos_t-19
x_time_is_quarter_end_t-19
x_time_years_since_2000_t-19
x_time_days_since_2000_t-19
x_time_months_since_2000_t-19
x_time_dow_sin_t-18
x_time_dow_cos_t-18
x_time_dom_sin_t-18
x_time_dom_cos_t-18
x_time_doy_sin_t-18
x_time_doy_cos_t-18
x_time_woy_sin_t-18
x_time_woy_cos_t-18
x_time_moy_sin_t-18
x_time_moy_cos_t-18
x_time_is_quarter_end_t-18
x_time_years_since_2000_t-18
x_time_days_since_2000_t-18
x_time_months_since_2000_t-18
x_time_dow_sin_t-17
x_time_dow_cos_t-17
x_time_dom_sin_t-17
x_time_dom_cos_t-17
x_time_doy_sin_t-17
x_time_doy_cos_t-17
x_time_woy_sin_t-17
x_time_woy_cos_t-17
x_time_moy_sin_t-17
x_time_moy_cos_t-17
x_time_is_quarter_end_t-17
x_time_years_since_2000_t-17
x_time_days_since_2000_t-17
x_time_months_since_2000_t-17
x_time_dow_sin_t-16
x_time_dow_cos_t-16
x_time_dom_sin_t-16
x_time_dom_cos_t-16
x_time_doy_sin_t-16
x_time_doy_cos_t-16
x_time_woy_sin_t-16
x_time_woy_cos_t-16
x_time_moy_sin_t-16
x_time_moy_cos_t-16
x_time_is_quarter_end_t-16
x_time_years_since_2000_t-16
x_time_days_since_2000_t-16
x_time_months_since_2000_t-16
x_time_dow_sin_t-15
x_time_dow_cos_t-15
x_time_dom_sin_t-15
x_time_dom_cos_t-15
x_time_doy_sin_t-15
x_time_doy_cos_t-15
x_time_woy_sin_t-15
x_time_woy_cos_t-15
x_time_moy_sin_t-15
x_time_moy_cos_t-15
x_time_is_quarter_end_t-15
x_time_years_since_2000_t-15
x_time_days_since_2000_t-15
x_time_months_since_2000_t-15
x_time_dow_sin_t-14
x_time_dow_cos_t-14
x_time_dom_sin_t-14
x_time_dom_cos_t-14
x_time_doy_sin_t-14
x_time_doy_cos_t-14
x_time_woy_sin_t-14
x_time_woy_cos_t-14
x_time_moy_sin_t-14
x_time_moy_cos_t-14
x_time_is_quarter_end_t-14
x_time_years_since_2000_t-14
x_time_days_since_2000_t-14
x_time_months_since_2000_t-14
x_time_dow_sin_t-13
x_time_dow_cos_t-13
x_time_dom_sin_t-13
x_time_dom_cos_t-13
x_time_doy_sin_t-13
x_time_doy_cos_t-13
x_time_woy_sin_t-13
x_time_woy_cos_t-13
x_time_moy_sin_t-13
x_time_moy_cos_t-13
x_time_is_quarter_end_t-13
x_time_years_since_2000_t-13
x_time_days_since_2000_t-13
x_time_months_since_2000_t-13
x_time_dow_sin_t-12
x_time_dow_cos_t-12
x_time_dom_sin_t-12
x_time_dom_cos_t-12
x_time_doy_sin_t-12
x_time_doy_cos_t-12
x_time_woy_sin_t-12
x_time_woy_cos_t-12
x_time_moy_sin_t-12
x_time_moy_cos_t-12
x_time_is_quarter_end_t-12
x_time_years_since_2000_t-12
x_time_days_since_2000_t-12
x_time_months_since_2000_t-12
x_time_dow_sin_t-11
x_time_dow_cos_t-11
x_time_dom_sin_t-11
x_time_dom_cos_t-11
x_time_doy_sin_t-11
x_time_doy_cos_t-11
x_time_woy_sin_t-11
x_time_woy_cos_t-11
x_time_moy_sin_t-11
x_time_moy_cos_t-11
x_time_is_quarter_end_t-11
x_time_years_since_2000_t-11
x_time_days_since_2000_t-11
x_time_months_since_2000_t-11
x_time_dow_sin_t-10
x_time_dow_cos_t-10
x_time_dom_sin_t-10
x_time_dom_cos_t-10
x_time_doy_sin_t-10
x_time_doy_cos_t-10
x_time_woy_sin_t-10
x_time_woy_cos_t-10
x_time_moy_sin_t-10
x_time_moy_cos_t-10
x_time_is_quarter_end_t-10
x_time_years_since_2000_t-10
x_time_days_since_2000_t-10
x_time_months_since_2000_t-10
x_time_dow_sin_t-9
x_time_dow_cos_t-9
x_time_dom_sin_t-9
x_time_dom_cos_t-9
x_time_doy_sin_t-9
x_time_doy_cos_t-9
x_time_woy_sin_t-9
x_time_woy_cos_t-9
x_time_moy_sin_t-9
x_time_moy_cos_t-9
x_time_is_quarter_end_t-9
x_time_years_since_2000_t-9
x_time_days_since_2000_t-9
x_time_months_since_2000_t-9
x_time_dow_sin_t-8
x_time_dow_cos_t-8
x_time_dom_sin_t-8
x_time_dom_cos_t-8
x_time_doy_sin_t-8
x_time_doy_cos_t-8
x_time_woy_sin_t-8
x_time_woy_cos_t-8
x_time_moy_sin_t-8
x_time_moy_cos_t-8
x_time_is_quarter_end_t-8
x_time_years_since_2000_t-8
x_time_days_since_2000_t-8
x_time_months_since_2000_t-8
x_time_dow_sin_t-7
x_time_dow_cos_t-7
x_time_dom_sin_t-7
x_time_dom_cos_t-7
x_time_doy_sin_t-7
x_time_doy_cos_t-7
x_time_woy_sin_t-7
x_time_woy_cos_t-7
x_time_moy_sin_t-7
x_time_moy_cos_t-7
x_time_is_quarter_end_t-7
x_time_years_since_2000_t-7
x_time_days_since_2000_t-7
x_time_months_since_2000_t-7
x_time_dow_sin_t-6
x_time_dow_cos_t-6
x_time_dom_sin_t-6
x_time_dom_cos_t-6
x_time_doy_sin_t-6
x_time_doy_cos_t-6
x_time_woy_sin_t-6
x_time_woy_cos_t-6
x_time_moy_sin_t-6
x_time_moy_cos_t-6
x_time_is_quarter_end_t-6
x_time_years_since_2000_t-6
x_time_days_since_2000_t-6
x_time_months_since_2000_t-6
x_time_dow_sin_t-5
x_time_dow_cos_t-5
x_time_dom_sin_t-5
x_time_dom_cos_t-5
x_time_doy_sin_t-5
x_time_doy_cos_t-5
x_time_woy_sin_t-5
x_time_woy_cos_t-5
x_time_moy_sin_t-5
x_time_moy_cos_t-5
x_time_is_quarter_end_t-5
x_time_years_since_2000_t-5
x_time_days_since_2000_t-5
x_time_months_since_2000_t-5
x_time_dow_sin_t-4
x_time_dow_cos_t-4
x_time_dom_sin_t-4
x_time_dom_cos_t-4
x_time_doy_sin_t-4
x_time_doy_cos_t-4
x_time_woy_sin_t-4
x_time_woy_cos_t-4
x_time_moy_sin_t-4
x_time_moy_cos_t-4
x_time_is_quarter_end_t-4
x_time_years_since_2000_t-4
x_time_days_since_2000_t-4
x_time_months_since_2000_t-4
x_time_dow_sin_t-3
x_time_dow_cos_t-3
x_time_dom_sin_t-3
x_time_dom_cos_t-3
x_time_doy_sin_t-3
x_time_doy_cos_t-3
x_time_woy_sin_t-3
x_time_woy_cos_t-3
x_time_moy_sin_t-3
x_time_moy_cos_t-3
x_time_is_quarter_end_t-3
x_time_years_since_2000_t-3
x_time_days_since_2000_t-3
x_time_months_since_2000_t-3
x_time_dow_sin_t-2
x_time_dow_cos_t-2
x_time_dom_sin_t-2
x_time_dom_cos_t-2
x_time_doy_sin_t-2
x_time_doy_cos_t-2
x_time_woy_sin_t-2
x_time_woy_cos_t-2
x_time_moy_sin_t-2
x_time_moy_cos_t-2
x_time_is_quarter_end_t-2
x_time_years_since_2000_t-2
x_time_days_since_2000_t-2
x_time_months_since_2000_t-2
x_time_dow_sin_t-1
x_time_dow_cos_t-1
x_time_dom_sin_t-1
x_time_dom_cos_t-1
x_time_doy_sin_t-1
x_time_doy_cos_t-1
x_time_woy_sin_t-1
x_time_woy_cos_t-1
x_time_moy_sin_t-1
x_time_moy_cos_t-1
x_time_is_quarter_end_t-1
x_time_years_since_2000_t-1
x_time_days_since_2000_t-1
x_time_months_since_2000_t-1
x_time_dow_sin_t-0
x_time_dow_cos_t-0
x_time_dom_sin_t-0
x_time_dom_cos_t-0
x_time_doy_sin_t-0
x_time_doy_cos_t-0
x_time_woy_sin_t-0
x_time_woy_cos_t-0
x_time_moy_sin_t-0
x_time_moy_cos_t-0
x_time_is_quarter_end_t-0
x_time_years_since_2000_t-0
x_time_days_since_2000_t-0
x_time_months_since_2000_t-0

 ---AAPL y columns:---
rv_t+1
lg_return_t+1
y_best_alpha_1_t+1
y_best_beta_1_t+1
y_best_omega_t+1
y_best_mu_t+1
y_best_nu_tgarch_t+1
rv_t+2
lg_return_t+2
y_best_alpha_1_t+2
y_best_beta_1_t+2
y_best_omega_t+2
y_best_mu_t+2
y_best_nu_tgarch_t+2
rv_t+3
lg_return_t+3
y_best_alpha_1_t+3
y_best_beta_1_t+3
y_best_omega_t+3
y_best_mu_t+3
y_best_nu_tgarch_t+3
rv_t+4
lg_return_t+4
y_best_alpha_1_t+4
y_best_beta_1_t+4
y_best_omega_t+4
y_best_mu_t+4
y_best_nu_tgarch_t+4
rv_t+5
lg_return_t+5
y_best_alpha_1_t+5
y_best_beta_1_t+5
y_best_omega_t+5
y_best_mu_t+5
y_best_nu_tgarch_t+5
rv_t+6
lg_return_t+6
y_best_alpha_1_t+6
y_best_beta_1_t+6
y_best_omega_t+6
y_best_mu_t+6
y_best_nu_tgarch_t+6
rv_t+7
lg_return_t+7
y_best_alpha_1_t+7
y_best_beta_1_t+7
y_best_omega_t+7
y_best_mu_t+7
y_best_nu_tgarch_t+7
rv_t+8
lg_return_t+8
y_best_alpha_1_t+8
y_best_beta_1_t+8
y_best_omega_t+8
y_best_mu_t+8
y_best_nu_tgarch_t+8
rv_t+9
lg_return_t+9
y_best_alpha_1_t+9
y_best_beta_1_t+9
y_best_omega_t+9
y_best_mu_t+9
y_best_nu_tgarch_t+9
rv_t+10
lg_return_t+10
y_best_alpha_1_t+10
y_best_beta_1_t+10
y_best_omega_t+10
y_best_mu_t+10
y_best_nu_tgarch_t+10
rv_t+11
lg_return_t+11
y_best_alpha_1_t+11
y_best_beta_1_t+11
y_best_omega_t+11
y_best_mu_t+11
y_best_nu_tgarch_t+11
rv_t+12
lg_return_t+12
y_best_alpha_1_t+12
y_best_beta_1_t+12
y_best_omega_t+12
y_best_mu_t+12
y_best_nu_tgarch_t+12
rv_t+13
lg_return_t+13
y_best_alpha_1_t+13
y_best_beta_1_t+13
y_best_omega_t+13
y_best_mu_t+13
y_best_nu_tgarch_t+13
rv_t+14
lg_return_t+14
y_best_alpha_1_t+14
y_best_beta_1_t+14
y_best_omega_t+14
y_best_mu_t+14
y_best_nu_tgarch_t+14
rv_t+15
lg_return_t+15
y_best_alpha_1_t+15
y_best_beta_1_t+15
y_best_omega_t+15
y_best_mu_t+15
y_best_nu_tgarch_t+15
rv_t+16
lg_return_t+16
y_best_alpha_1_t+16
y_best_beta_1_t+16
y_best_omega_t+16
y_best_mu_t+16
y_best_nu_tgarch_t+16
rv_t+17
lg_return_t+17
y_best_alpha_1_t+17
y_best_beta_1_t+17
y_best_omega_t+17
y_best_mu_t+17
y_best_nu_tgarch_t+17
rv_t+18
lg_return_t+18
y_best_alpha_1_t+18
y_best_beta_1_t+18
y_best_omega_t+18
y_best_mu_t+18
y_best_nu_tgarch_t+18
rv_t+19
lg_return_t+19
y_best_alpha_1_t+19
y_best_beta_1_t+19
y_best_omega_t+19
y_best_mu_t+19
y_best_nu_tgarch_t+19
rv_t+20
lg_return_t+20
y_best_alpha_1_t+20
y_best_beta_1_t+20
y_best_omega_t+20
y_best_mu_t+20
y_best_nu_tgarch_t+20
rv_t+21
lg_return_t+21
y_best_alpha_1_t+21
y_best_beta_1_t+21
y_best_omega_t+21
y_best_mu_t+21
y_best_nu_tgarch_t+21
rv_t+22
lg_return_t+22
y_best_alpha_1_t+22
y_best_beta_1_t+22
y_best_omega_t+22
y_best_mu_t+22
y_best_nu_tgarch_t+22
rv_t+23
lg_return_t+23
y_best_alpha_1_t+23
y_best_beta_1_t+23
y_best_omega_t+23
y_best_mu_t+23
y_best_nu_tgarch_t+23
rv_t+24
lg_return_t+24
y_best_alpha_1_t+24
y_best_beta_1_t+24
y_best_omega_t+24
y_best_mu_t+24
y_best_nu_tgarch_t+24
rv_t+25
lg_return_t+25
y_best_alpha_1_t+25
y_best_beta_1_t+25
y_best_omega_t+25
y_best_mu_t+25
y_best_nu_tgarch_t+25
rv_t+26
lg_return_t+26
y_best_alpha_1_t+26
y_best_beta_1_t+26
y_best_omega_t+26
y_best_mu_t+26
y_best_nu_tgarch_t+26
rv_t+27
lg_return_t+27
y_best_alpha_1_t+27
y_best_beta_1_t+27
y_best_omega_t+27
y_best_mu_t+27
y_best_nu_tgarch_t+27
rv_t+28
lg_return_t+28
y_best_alpha_1_t+28
y_best_beta_1_t+28
y_best_omega_t+28
y_best_mu_t+28
y_best_nu_tgarch_t+28
rv_t+29
lg_return_t+29
y_best_alpha_1_t+29
y_best_beta_1_t+29
y_best_omega_t+29
y_best_mu_t+29
y_best_nu_tgarch_t+29
rv_t+30
lg_return_t+30
y_best_alpha_1_t+30
y_best_beta_1_t+30
y_best_omega_t+30
y_best_mu_t+30
y_best_nu_tgarch_t+30
rv_t+31
lg_return_t+31
y_best_alpha_1_t+31
y_best_beta_1_t+31
y_best_omega_t+31
y_best_mu_t+31
y_best_nu_tgarch_t+31
rv_t+32
lg_return_t+32
y_best_alpha_1_t+32
y_best_beta_1_t+32
y_best_omega_t+32
y_best_mu_t+32
y_best_nu_tgarch_t+32
rv_t+33
lg_return_t+33
y_best_alpha_1_t+33
y_best_beta_1_t+33
y_best_omega_t+33
y_best_mu_t+33
y_best_nu_tgarch_t+33
rv_t+34
lg_return_t+34
y_best_alpha_1_t+34
y_best_beta_1_t+34
y_best_omega_t+34
y_best_mu_t+34
y_best_nu_tgarch_t+34
rv_t+35
lg_return_t+35
y_best_alpha_1_t+35
y_best_beta_1_t+35
y_best_omega_t+35
y_best_mu_t+35
y_best_nu_tgarch_t+35
rv_t+36
lg_return_t+36
y_best_alpha_1_t+36
y_best_beta_1_t+36
y_best_omega_t+36
y_best_mu_t+36
y_best_nu_tgarch_t+36
rv_t+37
lg_return_t+37
y_best_alpha_1_t+37
y_best_beta_1_t+37
y_best_omega_t+37
y_best_mu_t+37
y_best_nu_tgarch_t+37
rv_t+38
lg_return_t+38
y_best_alpha_1_t+38
y_best_beta_1_t+38
y_best_omega_t+38
y_best_mu_t+38
y_best_nu_tgarch_t+38
rv_t+39
lg_return_t+39
y_best_alpha_1_t+39
y_best_beta_1_t+39
y_best_omega_t+39
y_best_mu_t+39
y_best_nu_tgarch_t+39
rv_t+40
lg_return_t+40
y_best_alpha_1_t+40
y_best_beta_1_t+40
y_best_omega_t+40
y_best_mu_t+40
y_best_nu_tgarch_t+40
rv_t+41
lg_return_t+41
y_best_alpha_1_t+41
y_best_beta_1_t+41
y_best_omega_t+41
y_best_mu_t+41
y_best_nu_tgarch_t+41
rv_t+42
lg_return_t+42
y_best_alpha_1_t+42
y_best_beta_1_t+42
y_best_omega_t+42
y_best_mu_t+42
y_best_nu_tgarch_t+42
rv_t+43
lg_return_t+43
y_best_alpha_1_t+43
y_best_beta_1_t+43
y_best_omega_t+43
y_best_mu_t+43
y_best_nu_tgarch_t+43
rv_t+44
lg_return_t+44
y_best_alpha_1_t+44
y_best_beta_1_t+44
y_best_omega_t+44
y_best_mu_t+44
y_best_nu_tgarch_t+44
rv_t+45
lg_return_t+45
y_best_alpha_1_t+45
y_best_beta_1_t+45
y_best_omega_t+45
y_best_mu_t+45
y_best_nu_tgarch_t+45
rv_t+46
lg_return_t+46
y_best_alpha_1_t+46
y_best_beta_1_t+46
y_best_omega_t+46
y_best_mu_t+46
y_best_nu_tgarch_t+46
rv_t+47
lg_return_t+47
y_best_alpha_1_t+47
y_best_beta_1_t+47
y_best_omega_t+47
y_best_mu_t+47
y_best_nu_tgarch_t+47
rv_t+48
lg_return_t+48
y_best_alpha_1_t+48
y_best_beta_1_t+48
y_best_omega_t+48
y_best_mu_t+48
y_best_nu_tgarch_t+48
rv_t+49
lg_return_t+49
y_best_alpha_1_t+49
y_best_beta_1_t+49
y_best_omega_t+49
y_best_mu_t+49
y_best_nu_tgarch_t+49
rv_t+50
lg_return_t+50
y_best_alpha_1_t+50
y_best_beta_1_t+50
y_best_omega_t+50
y_best_mu_t+50
y_best_nu_tgarch_t+50
rv_t+51
lg_return_t+51
y_best_alpha_1_t+51
y_best_beta_1_t+51
y_best_omega_t+51
y_best_mu_t+51
y_best_nu_tgarch_t+51
rv_t+52
lg_return_t+52
y_best_alpha_1_t+52
y_best_beta_1_t+52
y_best_omega_t+52
y_best_mu_t+52
y_best_nu_tgarch_t+52
rv_t+53
lg_return_t+53
y_best_alpha_1_t+53
y_best_beta_1_t+53
y_best_omega_t+53
y_best_mu_t+53
y_best_nu_tgarch_t+53
rv_t+54
lg_return_t+54
y_best_alpha_1_t+54
y_best_beta_1_t+54
y_best_omega_t+54
y_best_mu_t+54
y_best_nu_tgarch_t+54
rv_t+55
lg_return_t+55
y_best_alpha_1_t+55
y_best_beta_1_t+55
y_best_omega_t+55
y_best_mu_t+55
y_best_nu_tgarch_t+55
rv_t+56
lg_return_t+56
y_best_alpha_1_t+56
y_best_beta_1_t+56
y_best_omega_t+56
y_best_mu_t+56
y_best_nu_tgarch_t+56
rv_t+57
lg_return_t+57
y_best_alpha_1_t+57
y_best_beta_1_t+57
y_best_omega_t+57
y_best_mu_t+57
y_best_nu_tgarch_t+57
rv_t+58
lg_return_t+58
y_best_alpha_1_t+58
y_best_beta_1_t+58
y_best_omega_t+58
y_best_mu_t+58
y_best_nu_tgarch_t+58
rv_t+59
lg_return_t+59
y_best_alpha_1_t+59
y_best_beta_1_t+59
y_best_omega_t+59
y_best_mu_t+59
y_best_nu_tgarch_t+59
rv_t+60
lg_return_t+60
y_best_alpha_1_t+60
y_best_beta_1_t+60
y_best_omega_t+60
y_best_mu_t+60
y_best_nu_tgarch_t+60
Checking aapl_price:
Shape: (2018, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  357020901.0
  Min value:  -0.166803569186452
Checking aapl_time:
Shape: (2018, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking aapl_y:
Shape: (2018, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  18807.929648208163
  Min value:  -135.29444775864093

 shape(X) for AAPL:
(2018, 60, 7)

 X for AAPL:
[[[6.76504706e-01 3.39139590e+07 1.64639200e-01 ... 2.22700829e-01
   9.08427206e-02 3.42649277e+00]
  [1.05204846e+00 3.62626110e+07 1.65286883e-01 ... 2.57627872e-01
   1.02183271e-01 3.41951773e+00]
  [6.63962853e-01 2.67453480e+07 1.63020134e-01 ... 2.56669021e-01
   1.08512804e-01 3.44750609e+00]
  ...
  [5.64998964e-01 2.25292560e+07 2.01842972e-01 ... 3.08889996e-01
   1.21153897e-01 2.95237762e+00]
  [3.58117505e-01 2.25897220e+07 2.06529393e-01 ... 3.19430141e-01
   1.15773904e-01 2.86080882e+00]
  [4.32127322e-01 2.15473710e+07 2.02593361e-01 ... 2.98567078e-01
   1.12065441e-01 2.87163649e+00]]

 [[1.05204846e+00 3.62626110e+07 1.65286883e-01 ... 2.57627872e-01
   1.02183271e-01 3.41951773e+00]
  [6.63962853e-01 2.67453480e+07 1.63020134e-01 ... 2.56669021e-01
   1.08512804e-01 3.44750609e+00]
  [8.86629800e-01 2.97761640e+07 1.63411349e-01 ... 2.43772670e-01
   1.09771755e-01 3.42531078e+00]
  ...
  [3.58117505e-01 2.25897220e+07 2.06529393e-01 ... 3.19430141e-01
   1.15773904e-01 2.86080882e+00]
  [4.32127322e-01 2.15473710e+07 2.02593361e-01 ... 2.98567078e-01
   1.12065441e-01 2.87163649e+00]
  [4.41486834e-01 2.08974510e+07 2.06604778e-01 ... 2.95875266e-01
   1.11179457e-01 2.85269742e+00]]

 [[6.63962853e-01 2.67453480e+07 1.63020134e-01 ... 2.56669021e-01
   1.08512804e-01 3.44750609e+00]
  [8.86629800e-01 2.97761640e+07 1.63411349e-01 ... 2.43772670e-01
   1.09771755e-01 3.42531078e+00]
  [9.15033653e-01 2.91556670e+07 1.62962724e-01 ... 2.40149969e-01
   1.12142138e-01 3.40853826e+00]
  ...
  [4.32127322e-01 2.15473710e+07 2.02593361e-01 ... 2.98567078e-01
   1.12065441e-01 2.87163649e+00]
  [4.41486834e-01 2.08974510e+07 2.06604778e-01 ... 2.95875266e-01
   1.11179457e-01 2.85269742e+00]
  [1.36512504e+00 2.90532960e+07 2.04656453e-01 ... 2.93698805e-01
   1.13288622e-01 2.86650287e+00]]

 ...

 [[5.06469051e+00 4.96307250e+07 1.16562350e-01 ... 1.83034562e+00
   1.23134418e-01 4.59207242e+00]
  [2.68596134e+00 3.94352940e+07 1.14335186e-01 ... 1.82517375e+00
   1.17654233e-01 4.63767933e+00]
  [3.22404246e+00 3.98319690e+07 1.13544083e-01 ... 1.81937648e+00
   1.13756999e-01 4.63165513e+00]
  ...
  [3.08638235e+01 1.25908914e+08 4.71686893e-02 ... 2.25135409e-01
   1.90740443e-01 3.31304220e+00]
  [8.46587393e+01 1.60466286e+08 2.71731667e-01 ... 2.59116206e+00
   1.98313673e-01 3.31323607e+00]
  [2.67920976e+01 1.20853216e+08 2.89938370e-01 ... 2.53524775e+00
   1.95548084e-01 3.35768112e+00]]

 [[2.68596134e+00 3.94352940e+07 1.14335186e-01 ... 1.82517375e+00
   1.17654233e-01 4.63767933e+00]
  [3.22404246e+00 3.98319690e+07 1.13544083e-01 ... 1.81937648e+00
   1.13756999e-01 4.63165513e+00]
  [3.34604999e+00 7.17590520e+07 1.09884175e-01 ... 1.82818521e+00
   1.20829087e-01 4.70491215e+00]
  ...
  [8.46587393e+01 1.60466286e+08 2.71731667e-01 ... 2.59116206e+00
   1.98313673e-01 3.31323607e+00]
  [2.67920976e+01 1.20853216e+08 2.89938370e-01 ... 2.53524775e+00
   1.95548084e-01 3.35768112e+00]
  [1.12407504e+02 1.84386792e+08 3.23695156e-01 ... 2.19907159e+00
   2.02372852e-01 3.32020312e+00]]

 [[3.22404246e+00 3.98319690e+07 1.13544083e-01 ... 1.81937648e+00
   1.13756999e-01 4.63165513e+00]
  [3.34604999e+00 7.17590520e+07 1.09884175e-01 ... 1.82818521e+00
   1.20829087e-01 4.70491215e+00]
  [1.80367858e+00 6.84883010e+07 1.35468567e-01 ... 1.87419340e+00
   1.22216999e-01 4.45644413e+00]
  ...
  [2.67920976e+01 1.20853216e+08 2.89938370e-01 ... 2.53524775e+00
   1.95548084e-01 3.35768112e+00]
  [1.12407504e+02 1.84386792e+08 3.23695156e-01 ... 2.19907159e+00
   2.02372852e-01 3.32020312e+00]
  [3.09586134e+01 1.21879981e+08 2.61815546e-01 ... 9.34619600e-01
   2.16608185e-01 3.20569311e+00]]]

 shape(Time) for AAPL:
(2018, 60, 14)

 Time for AAPL:
[[[-4.33883739e-01 -9.00968868e-01  9.37752132e-01 ...  0.00000000e+00
    0.00000000e+00  0.00000000e+00]
  [ 0.00000000e+00  1.00000000e+00  9.68077119e-01 ...  0.00000000e+00
    9.65872505e-04  0.00000000e+00]
  [ 7.81831482e-01  6.23489802e-01  8.97804540e-01 ...  0.00000000e+00
    1.28783001e-03  0.00000000e+00]
  ...
  [ 4.33883739e-01 -9.00968868e-01 -2.01298520e-01 ...  0.00000000e+00
    2.67224726e-02  1.25461255e-01]
  [-4.33883739e-01 -9.00968868e-01 -2.44929360e-16 ...  0.00000000e+00
    2.70444301e-02  1.25461255e-01]
  [ 0.00000000e+00  1.00000000e+00  5.71268215e-01 ...  0.00000000e+00
    2.80103026e-02  1.88191882e-01]]

 [[ 0.00000000e+00  1.00000000e+00  9.68077119e-01 ...  0.00000000e+00
    9.65872505e-04  0.00000000e+00]
  [ 7.81831482e-01  6.23489802e-01  8.97804540e-01 ...  0.00000000e+00
    1.28783001e-03  0.00000000e+00]
  [ 9.74927912e-01 -2.22520934e-01  7.90775737e-01 ...  0.00000000e+00
    1.60978751e-03  0.00000000e+00]
  ...
  [-4.33883739e-01 -9.00968868e-01 -2.44929360e-16 ...  0.00000000e+00
    2.70444301e-02  1.25461255e-01]
  [ 0.00000000e+00  1.00000000e+00  5.71268215e-01 ...  0.00000000e+00
    2.80103026e-02  1.88191882e-01]
  [ 7.81831482e-01  6.23489802e-01  7.24792787e-01 ...  0.00000000e+00
    2.83322601e-02  1.88191882e-01]]

 [[ 7.81831482e-01  6.23489802e-01  8.97804540e-01 ...  0.00000000e+00
    1.28783001e-03  0.00000000e+00]
  [ 9.74927912e-01 -2.22520934e-01  7.90775737e-01 ...  0.00000000e+00
    1.60978751e-03  0.00000000e+00]
  [ 4.33883739e-01 -9.00968868e-01  6.51372483e-01 ...  0.00000000e+00
    1.93174501e-03  0.00000000e+00]
  ...
  [ 0.00000000e+00  1.00000000e+00  5.71268215e-01 ...  0.00000000e+00
    2.80103026e-02  1.88191882e-01]
  [ 7.81831482e-01  6.23489802e-01  7.24792787e-01 ...  0.00000000e+00
    2.83322601e-02  1.88191882e-01]
  [ 9.74927912e-01 -2.22520934e-01  8.48644257e-01 ...  0.00000000e+00
    2.86542176e-02  1.88191882e-01]]

 ...

 [[ 0.00000000e+00  1.00000000e+00  4.85301963e-01 ...  1.00000000e+00
    9.43013522e-01  2.95202952e-02]
  [ 7.81831482e-01  6.23489802e-01  2.99363123e-01 ...  1.00000000e+00
    9.43335480e-01  2.95202952e-02]
  [ 9.74927912e-01 -2.22520934e-01  1.01168322e-01 ...  1.00000000e+00
    9.43657437e-01  2.95202952e-02]
  ...
  [-4.33883739e-01 -9.00968868e-01  7.24792787e-01 ...  1.00000000e+00
    9.69092080e-01  3.06273063e-01]
  [ 0.00000000e+00  1.00000000e+00  9.88468324e-01 ...  1.00000000e+00
    9.70057952e-01  3.06273063e-01]
  [ 7.81831482e-01  6.23489802e-01  9.98716507e-01 ...  1.00000000e+00
    9.70379910e-01  3.06273063e-01]]

 [[ 7.81831482e-01  6.23489802e-01  2.99363123e-01 ...  1.00000000e+00
    9.43335480e-01  2.95202952e-02]
  [ 9.74927912e-01 -2.22520934e-01  1.01168322e-01 ...  1.00000000e+00
    9.43657437e-01  2.95202952e-02]
  [ 4.33883739e-01 -9.00968868e-01 -1.01168322e-01 ...  1.00000000e+00
    9.43979395e-01  2.95202952e-02]
  ...
  [ 0.00000000e+00  1.00000000e+00  9.88468324e-01 ...  1.00000000e+00
    9.70057952e-01  3.06273063e-01]
  [ 7.81831482e-01  6.23489802e-01  9.98716507e-01 ...  1.00000000e+00
    9.70379910e-01  3.06273063e-01]
  [ 9.74927912e-01 -2.22520934e-01  9.68077119e-01 ...  1.00000000e+00
    9.70701867e-01  3.06273063e-01]]

 [[ 9.74927912e-01 -2.22520934e-01  1.01168322e-01 ...  1.00000000e+00
    9.43657437e-01  2.95202952e-02]
  [ 4.33883739e-01 -9.00968868e-01 -1.01168322e-01 ...  1.00000000e+00
    9.43979395e-01  2.95202952e-02]
  [-4.33883739e-01 -9.00968868e-01 -2.99363123e-01 ...  1.00000000e+00
    9.44301352e-01  2.95202952e-02]
  ...
  [ 7.81831482e-01  6.23489802e-01  9.98716507e-01 ...  1.00000000e+00
    9.70379910e-01  3.06273063e-01]
  [ 9.74927912e-01 -2.22520934e-01  9.68077119e-01 ...  1.00000000e+00
    9.70701867e-01  3.06273063e-01]
  [ 4.33883739e-01 -9.00968868e-01  8.97804540e-01 ...  1.00000000e+00
    9.71023825e-01  3.06273063e-01]]]

 shape(Y) for AAPL:
(2018, 60, 7)

 Y for AAPL:
[[[ 4.41486834e-01  7.41848308e-01  1.88166974e-01 ...  3.01510467e-01
    1.13948224e-01  3.07411399e+00]
  [ 1.36512504e+00 -5.19409735e-01  1.83240115e-01 ...  2.95823997e-01
    1.16869956e-01  3.08190592e+00]
  [ 8.66251987e-01 -2.50278217e-01  1.78770944e-01 ...  2.81516914e-01
    1.16380525e-01  3.16053146e+00]
  ...
  [ 1.41220190e+00 -3.14960890e-01  2.08036115e-01 ...  3.14029331e-01
    1.15914948e-01  2.88032185e+00]
  [ 1.56907991e+00 -1.44364448e+00  1.99235095e-01 ...  3.16129285e-01
    1.21747294e-01  2.92337593e+00]
  [ 3.81369644e+00  1.45050202e+00  1.96403318e-01 ...  3.07975012e-01
    1.24363615e-01  2.92378402e+00]]

 [[ 1.36512504e+00 -5.19409735e-01  1.83240115e-01 ...  2.95823997e-01
    1.16869956e-01  3.08190592e+00]
  [ 8.66251987e-01 -2.50278217e-01  1.78770944e-01 ...  2.81516914e-01
    1.16380525e-01  3.16053146e+00]
  [ 6.32251338e-01 -2.22996608e-01  1.80024678e-01 ...  2.88854204e-01
    1.08825694e-01  3.08464777e+00]
  ...
  [ 1.56907991e+00 -1.44364448e+00  1.99235095e-01 ...  3.16129285e-01
    1.21747294e-01  2.92337593e+00]
  [ 3.81369644e+00  1.45050202e+00  1.96403318e-01 ...  3.07975012e-01
    1.24363615e-01  2.92378402e+00]
  [ 1.95125755e+00 -1.48529552e+00  1.97948067e-01 ...  3.16598413e-01
    1.29260535e-01  2.89330494e+00]]

 [[ 8.66251987e-01 -2.50278217e-01  1.78770944e-01 ...  2.81516914e-01
    1.16380525e-01  3.16053146e+00]
  [ 6.32251338e-01 -2.22996608e-01  1.80024678e-01 ...  2.88854204e-01
    1.08825694e-01  3.08464777e+00]
  [ 4.93781630e-01 -1.18669519e-01  1.81208451e-01 ...  2.99632060e-01
    1.23042324e-01  2.98320687e+00]
  ...
  [ 3.81369644e+00  1.45050202e+00  1.96403318e-01 ...  3.07975012e-01
    1.24363615e-01  2.92378402e+00]
  [ 1.95125755e+00 -1.48529552e+00  1.97948067e-01 ...  3.16598413e-01
    1.29260535e-01  2.89330494e+00]
  [ 1.69170531e+00  2.36357427e-01  2.04112876e-01 ...  3.53462987e-01
    1.26904646e-01  2.92913158e+00]]

 ...

 [[ 1.12407504e+02  1.42617411e+01  2.46518753e-01 ...  7.49091848e-01
    2.34520318e-01  3.04392012e+00]
  [ 3.09586134e+01 -4.33186128e+00  2.56066556e-01 ...  7.74816906e-01
    2.41687578e-01  2.95369362e+00]
  [ 1.62449203e+01  3.97921608e+00  2.51671100e-01 ...  8.06168063e-01
    2.39529877e-01  2.99872318e+00]
  ...
  [ 2.76021631e+00  2.19872751e+00  1.97787490e-01 ...  7.40407270e-01
    1.82203879e-01  3.04172446e+00]
  [ 1.76195374e+00  5.21140173e-01  1.88880007e-01 ...  7.02716465e-01
    1.79924591e-01  3.15133418e+00]
  [ 2.61004026e+00 -1.70015902e+00  1.88020982e-01 ...  6.67213086e-01
    1.75567247e-01  3.25255026e+00]]

 [[ 3.09586134e+01 -4.33186128e+00  2.56066556e-01 ...  7.74816906e-01
    2.41687578e-01  2.95369362e+00]
  [ 1.62449203e+01  3.97921608e+00  2.51671100e-01 ...  8.06168063e-01
    2.39529877e-01  2.99872318e+00]
  [ 5.49327509e+01  2.18143275e+00  2.68561502e-01 ...  8.83130950e-01
    2.43072282e-01  2.99819484e+00]
  ...
  [ 1.76195374e+00  5.21140173e-01  1.88880007e-01 ...  7.02716465e-01
    1.79924591e-01  3.15133418e+00]
  [ 2.61004026e+00 -1.70015902e+00  1.88020982e-01 ...  6.67213086e-01
    1.75567247e-01  3.25255026e+00]
  [ 1.93572910e+00  2.85741501e-02  1.88020982e-01 ...  6.67213086e-01
    1.75567247e-01  3.25255026e+00]]

 [[ 1.62449203e+01  3.97921608e+00  2.51671100e-01 ...  8.06168063e-01
    2.39529877e-01  2.99872318e+00]
  [ 5.49327509e+01  2.18143275e+00  2.68561502e-01 ...  8.83130950e-01
    2.43072282e-01  2.99819484e+00]
  [ 6.46699860e+00 -1.87812046e-01  2.66459692e-01 ...  8.44436372e-01
    2.50342693e-01  2.84838905e+00]
  ...
  [ 2.61004026e+00 -1.70015902e+00  1.88020982e-01 ...  6.67213086e-01
    1.75567247e-01  3.25255026e+00]
  [ 1.93572910e+00  2.85741501e-02  1.88020982e-01 ...  6.67213086e-01
    1.75567247e-01  3.25255026e+00]
  [ 1.71835643e+00  5.36627193e-01  1.88020982e-01 ...  6.67213086e-01
    1.75567247e-01  3.25255026e+00]]]

Some aditional tests to check how the data is ouputed with just on target variable¶

In [92]:
additional_test_data = df_to_raw_X_y_dictionary(df_dict["AAPL"], ticker="AAPL", rv_y = True, garch_var_x= False, best_param_x=False, best_param_y=False)
y_additional_test_data = additional_test_data["y"]
check_data_issues(y_additional_test_data, "AAPL additional test data")
Checking AAPL additional test data:
Shape: (2018, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  18807.929648208163
  Min value:  0.250100463008116

Model evaluation metrics¶

Helper functions¶

Some helper function that can compute relevant metrics¶

In [93]:
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score

def evaluate_predictions(
    y_true,
    y_pred,
    horizons=[1, 3, 5, 10, 20, -1],
    epsilon=1e-12,
    qlike_mode="ratio",
    qlike_floor="auto",
    qlike_calibrate=True
):
    results = {}
    assert y_true.ndim == 2 and y_pred.ndim == 2
    assert y_true.shape == y_pred.shape

    T = y_true.shape[1]

    def _auto_floor(arr):
        arr = np.asarray(arr, dtype=np.float64)
        pos = arr[np.isfinite(arr) & (arr > 0)]
        if pos.size == 0:
            return float(epsilon)
        base = np.percentile(pos, 5.0)
        return float(max(epsilon, 1e-6 * base))

    for h in horizons:
        label = f"{h} day(s)" if h > 0 else "full horizon"
        h_slice = h if h > 0 else T
        if h_slice > T:
            continue

        yt = y_true[:, :h_slice].astype(np.float64).ravel()
        yp = y_pred[:, :h_slice].astype(np.float64).ravel()

        mask = np.isfinite(yt) & np.isfinite(yp)
        yt = yt[mask]
        yp = yp[mask]

        if yt.size == 0:
            results[f"{label} MAE"]        = np.nan
            results[f"{label} RMSE"]       = np.nan
            results[f"{label} R2"]         = np.nan
            results[f"{label} Pearson r"]  = np.nan
            results[f"{label} QLIKE"]      = np.nan
            continue

        mae  = float(mean_absolute_error(yt, yp))
        rmse = float(np.sqrt(mean_squared_error(yt, yp)))
        r2   = float(r2_score(yt, yp))

        ytm = yt - yt.mean()
        ypm = yp - yp.mean()
        denom = np.sqrt((ytm**2).mean() * (ypm**2).mean())
        pearson_r = float((ytm*ypm).mean() / denom) if denom > epsilon else np.nan

        if qlike_floor == "auto":
            floor = _auto_floor(yt)
        else:
            floor = max(float(qlike_floor), float(epsilon))

        yt_pos = np.clip(yt, floor, None)
        yp_pos = np.clip(yp, floor, None)

        if qlike_calibrate:
            c = float(np.mean(yt_pos / yp_pos))
            yp_pos = np.clip(yp_pos * c, floor, None)

        if qlike_mode == "ratio":
            r = yt_pos / yp_pos
            qlike = float(np.mean(r - np.log(r) - 1.0))
        elif qlike_mode == "log":
            qlike = float(np.mean(np.log(yp_pos) + (yt_pos / yp_pos)))
        else:
            raise ValueError("qlike_mode must be 'ratio' or 'log'")

        results[f"{label} MAE"]        = mae
        results[f"{label} RMSE"]       = rmse
        results[f"{label} R2"]         = r2
        results[f"{label} Pearson r"]  = pearson_r
        results[f"{label} QLIKE"]      = qlike

    return results

Import external libraries¶

In [94]:
import sys

external_libraries_relative_path = 'External_libraries'
time_series_library_relative_path = 'Time-Series-Library'

external_libraries_path = os.path.join(root_folder, external_libraries_relative_path)
time_series_library_path = os.path.join(external_libraries_path, time_series_library_relative_path)

print("Times-Series-Library repo path:", time_series_library_path)

sys.path.append(time_series_library_path)
Times-Series-Library repo path: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/External_libraries/Time-Series-Library

Install some dependencies¶

In [95]:
!pip install reformer-pytorch
Requirement already satisfied: reformer-pytorch in /opt/anaconda3/envs/NN_env/lib/python3.10/site-packages (1.4.4)
Requirement already satisfied: axial-positional-embedding>=0.1.0 in /opt/anaconda3/envs/NN_env/lib/python3.10/site-packages (from reformer-pytorch) (0.3.12)
Requirement already satisfied: einops in /opt/anaconda3/envs/NN_env/lib/python3.10/site-packages (from reformer-pytorch) (0.8.1)
Requirement already satisfied: local-attention in /opt/anaconda3/envs/NN_env/lib/python3.10/site-packages (from reformer-pytorch) (1.11.2)
Requirement already satisfied: product-key-memory in /opt/anaconda3/envs/NN_env/lib/python3.10/site-packages (from reformer-pytorch) (0.2.11)
Requirement already satisfied: torch in /opt/anaconda3/envs/NN_env/lib/python3.10/site-packages (from reformer-pytorch) (2.8.0)
Requirement already satisfied: filelock in /opt/anaconda3/envs/NN_env/lib/python3.10/site-packages (from torch->reformer-pytorch) (3.19.1)
Requirement already satisfied: typing-extensions>=4.10.0 in /opt/anaconda3/envs/NN_env/lib/python3.10/site-packages (from torch->reformer-pytorch) (4.12.2)
Requirement already satisfied: sympy>=1.13.3 in /opt/anaconda3/envs/NN_env/lib/python3.10/site-packages (from torch->reformer-pytorch) (1.14.0)
Requirement already satisfied: networkx in /opt/anaconda3/envs/NN_env/lib/python3.10/site-packages (from torch->reformer-pytorch) (3.4.2)
Requirement already satisfied: jinja2 in /opt/anaconda3/envs/NN_env/lib/python3.10/site-packages (from torch->reformer-pytorch) (3.1.6)
Requirement already satisfied: fsspec in /opt/anaconda3/envs/NN_env/lib/python3.10/site-packages (from torch->reformer-pytorch) (2025.7.0)
Requirement already satisfied: mpmath<1.4,>=1.1.0 in /opt/anaconda3/envs/NN_env/lib/python3.10/site-packages (from sympy>=1.13.3->torch->reformer-pytorch) (1.3.0)
Requirement already satisfied: MarkupSafe>=2.0 in /opt/anaconda3/envs/NN_env/lib/python3.10/site-packages (from jinja2->torch->reformer-pytorch) (3.0.2)
Requirement already satisfied: hyper-connections>=0.1.8 in /opt/anaconda3/envs/NN_env/lib/python3.10/site-packages (from local-attention->reformer-pytorch) (0.2.1)
Requirement already satisfied: colt5-attention>=0.10.14 in /opt/anaconda3/envs/NN_env/lib/python3.10/site-packages (from product-key-memory->reformer-pytorch) (0.11.1)
Requirement already satisfied: packaging in /opt/anaconda3/envs/NN_env/lib/python3.10/site-packages (from colt5-attention>=0.10.14->product-key-memory->reformer-pytorch) (25.0)

Some helper functions to process 3D tensors¶

In [96]:
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import numpy as np

def combine_3D_inputs(X1, X2):
    return np.concatenate([X1, X2], axis=2)


def scale_3d_array(array, type="standard"):
    if array.ndim != 3:
        raise ValueError("Input array must be 3D (batch, time, features)")
    B, T, F = array.shape
    reshaped = array.reshape(-1, F)

    if type == "min_max":
        scaler = MinMaxScaler(clip=False)
    elif type == "standard":
        scaler = StandardScaler()
    else:
        raise ValueError("Unknown scaling type. Use 'min_max' or 'standard'.")

    scaled = scaler.fit_transform(reshaped).reshape(B, T, F)
    return scaled, scaler

def apply_scaler_to_3d_array(array, scaler):
    if array is None:
        return None
    B, T, F = array.shape
    reshaped = array.reshape(-1, F)
    scaled = scaler.transform(reshaped).reshape(B, T, F)
    return scaled

def inverse_scale_3d(array, scaler):
    if array is None:
        return None
    B, T, F = array.shape
    reshaped = array.reshape(-1, F)
    inv = scaler.inverse_transform(reshaped).reshape(B, T, F)
    return inv



def flatten_3d_tensor_by_feature_first(array):
    if array.ndim != 3:
        raise ValueError("Expected a 3D array of shape (batch, time, features)")

    return array.transpose(0, 2, 1).reshape(array.shape[0], -1)


def flatten_3d_tensor_standard(array):
    if array.ndim != 3:
        raise ValueError("Expected a 3D array of shape (batch_size, time_steps, features)")

    return array.reshape(array.shape[0], -1)


def unflatten_3d_tensor_standard(array, time_steps, features):
    if array.ndim != 2:
        raise ValueError("Expected a 2D array (batch_size, time_steps * features)")

    batch_size = array.shape[0]
    expected_dim = time_steps * features
    if array.shape[1] != expected_dim:
        raise ValueError(f"Expected second dimension to be {expected_dim}, got {array.shape[1]}")

    return array.reshape(batch_size, time_steps, features)


def unflatten_3d_tensor_by_feature_first(array, time_steps, features):
    if array.ndim != 2:
        raise ValueError("Expected a 2D array (batch_size, features * time_steps)")

    batch_size = array.shape[0]
    expected_dim = features * time_steps
    if array.shape[1] != expected_dim:
        raise ValueError(f"Expected second dimension to be {expected_dim}, got {array.shape[1]}")

    reshaped = array.reshape(batch_size, features, time_steps)
    return reshaped.transpose(0, 2, 1)

Some functions to do checking on the measures computed with keras backbone¶

In [97]:
import numpy as np

def probe_scaled_and_unscaled(model, X_test, y_test_scaled, scaler_y, task_idx=None):
    pred_scaled = model.model_.predict(X_test, verbose=0) if hasattr(model, "model_") and model.model_ is not None else model.predict(X_test)


    if pred_scaled.ndim == 2:  # (N,H)
        pred_scaled = pred_scaled[..., None]
    if y_test_scaled.ndim == 2:
        y_test_scaled = y_test_scaled[..., None]

    N, H, F = y_test_scaled.shape
    assert pred_scaled.shape == (N, H, F), f"Shape mismatch: pred {pred_scaled.shape} vs true {y_test_scaled.shape}"


    if task_idx is not None:
        ps = pred_scaled[..., task_idx:task_idx+1]
        ys = y_test_scaled[..., task_idx:task_idx+1]

        mse_s  = np.mean((ps - ys) ** 2)
        rmse_s = float(np.sqrt(mse_s))
        mae_s  = float(np.mean(np.abs(ps - ys)))


        if hasattr(scaler_y, "data_range_"):
            scale = float(scaler_y.data_range_[task_idx])
            shift = float(scaler_y.data_min_[task_idx])
            pu = ps * scale + shift
            tu = ys * scale + shift
            rmse_u_exp = rmse_s * scale
            mae_u_exp  = mae_s  * scale
        elif hasattr(scaler_y, "scale_"):
            scale = float(scaler_y.scale_[task_idx])
            shift = float(scaler_y.mean_[task_idx])
            pu = ps * scale + shift
            tu = ys * scale + shift
            rmse_u_exp = rmse_s * scale
            mae_u_exp  = mae_s  * scale
        else:
            raise ValueError("Unsupported scaler type; expected MinMaxScaler or StandardScaler.")

        rmse_u = float(np.sqrt(np.mean((pu - tu) ** 2)))
        mae_u  = float(np.mean(np.abs(pu - tu)))

        return {
            "rmse_scaled": rmse_s,
            "mae_scaled": mae_s,
            "rmse_unscaled_expected": rmse_u_exp,
            "mae_unscaled_expected": mae_u_exp,
            "rmse_unscaled": rmse_u,
            "mae_unscaled": mae_u,
        }

    diff_s = pred_scaled - y_test_scaled
    rmse_s = float(np.sqrt(np.mean(diff_s ** 2)))
    mae_s  = float(np.mean(np.abs(diff_s)))


    if hasattr(scaler_y, "data_range_"):
        scales = scaler_y.data_range_.reshape(1, 1, F)
        shifts = scaler_y.data_min_.reshape(1, 1, F)
        diff_u_exp = diff_s * scales
    elif hasattr(scaler_y, "scale_"):
        scales = scaler_y.scale_.reshape(1, 1, F)
        shifts = scaler_y.mean_.reshape(1, 1, F)
        diff_u_exp = diff_s * scales
    else:
        raise ValueError("Unsupported scaler type; expected MinMaxScaler or StandardScaler.")

    rmse_u_exp = float(np.sqrt(np.mean(diff_u_exp ** 2)))
    mae_u_exp  = float(np.mean(np.abs(diff_u_exp)))


    pred_u = scaler_y.inverse_transform(pred_scaled.reshape(-1, F)).reshape(N, H, F)
    true_u = scaler_y.inverse_transform(y_test_scaled.reshape(-1, F)).reshape(N, H, F)

    rmse_u = float(np.sqrt(np.mean((pred_u - true_u) ** 2)))
    mae_u  = float(np.mean(np.abs(pred_u - true_u)))

    return {
        "rmse_scaled": rmse_s,
        "mae_scaled": mae_s,
        "rmse_unscaled_expected": rmse_u_exp,
        "mae_unscaled_expected": mae_u_exp,
        "rmse_unscaled": rmse_u,
        "mae_unscaled": mae_u,
    }

Main function for training and evaluating models¶

We had to spearate the "actual features" X, from the "time" X because of the scaling logic.

In [98]:
import pickle
import os
import numpy as np
from sklearn.model_selection import KFold, TimeSeriesSplit
from sklearn.metrics import mean_squared_error
from sklearn.base import clone
from sklearn.model_selection import ParameterGrid
from scipy.stats import t
import torch
import random

def train_and_evaluate_model(
    X_price,
    X_time,
    y,
    model_type,
    *,
    target_mode="log_var_ratio",
    baseline_feature_idx=0,
    baseline_window=10,
    baseline_eps=1e-12,
    merge_price_time=True,
    use_nested_cv=False,
    outer_folds=5,
    inner_folds=3,
    param_grid=None,
    normalize_X=True,
    normalize_Time=False,
    normalize_y=True,
    random_state=42,
    save_model_path=None,
    checkpoint_path=None,
    verbose=True,
    epochs=50,
    batch_size=32,
    lr=3e-5,
    flatten=True,
    no_tasks=1,
    trainable_parts=False,
    hidden_layers=3,
    time_horizon=1,
    single_holdout=False,
    val_frac=0.10,
    debug_scaling=False,
    dropout=0.00,
    l2_weight=0.00,
    patience=10,
    min_epochs=50,
    min_delta=1e-4,
    hidden_dim=16,
    knots=8,
    spline_power=3,
    warmup_aux_epochs=15,
    joint_epochs=15,
    d_model=64,
    d_ff=254,
    n_heads=4,
    e_layers=2,
    y_scale_type="standard",
    cv_strategy="time",
    metric_for_early_stop="mse_model",
    esv_frac=None,
    deterministic_torch=False,
    final_refit_holdout_frac=0.20,
):
    def _reset_seeds(seed, deterministic=False):
        random.seed(seed); np.random.seed(seed); torch.manual_seed(seed)
        if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed)
        if deterministic:
            try:
                torch.use_deterministic_algorithms(True)
                torch.backends.cudnn.benchmark = False
            except Exception:
                pass

    def _cv_splitter(kind, n_splits):
        if kind == "time":
            return TimeSeriesSplit(n_splits=n_splits)
        return KFold(n_splits=n_splits, shuffle=False)

    def has_sklearn_api(m):
        return hasattr(m, "get_params") and hasattr(m, "set_params")

    def safe_clone(m):
        if has_sklearn_api(m):
            try:
                return clone(m)
            except Exception:
                pass
        params = m.get_params() if hasattr(m, "get_params") else {}
        return m.__class__(**params)

    assert len(y.shape) == 3, "y must be a 3D array (batch, time, features)"
    y = y[:, 0:time_horizon, :]
    if no_tasks < y.shape[2]:
        y = y[:, :, :no_tasks]

    if merge_price_time and normalize_X and not normalize_Time:
        if verbose:
            print("merge_price_time=True with normalize_X=True but normalize_Time=False -> enabling normalize_Time=True for scale alignment.")
        normalize_Time = True

    def _resolve_mode_and_loss(target_mode, normalize_y):
        if target_mode == "log_var_ratio":
            loss_type = "mse"; target_is_logvar = False
        elif target_mode == "log":
            loss_type = "gauss_nll_var"; target_is_logvar = True
            if normalize_y:
                print("normalize_y=True with NLL(log) → forcing normalize_y=False.")
                normalize_y = False
        elif target_mode == "identity":
            loss_type = "mse"; target_is_logvar = False
        elif target_mode == "log_mse":
            loss_type = "mse"; target_is_logvar = True
        else:
            raise ValueError(f"Unknown target_mode: {target_mode}")
        return loss_type, target_is_logvar, normalize_y

    loss_type, target_is_logvar, normalize_y = _resolve_mode_and_loss(target_mode, normalize_y)
    print(f"[mode={target_mode}] loss_type={loss_type}, target_is_logvar={target_is_logvar}, normalize_y={normalize_y}")

    EPS_MIN = 1e-12
    FLOOR_Y = max(EPS_MIN, baseline_eps)
    esv_frac = val_frac if esv_frac is None else esv_frac

    def _build_baseline_var_from_Xp(Xp, y_like, feat_idx=0, window=20, eps=1e-12):
        B, Ty, Fy = y_like.shape
        if Xp.shape[1] < window:
            raise ValueError(f"X_price time_steps={Xp.shape[1]} < baseline_window={window}")
        x = Xp[:, -window:, feat_idx]
        rv = np.mean(np.square(x), axis=1)
        rv = np.clip(rv, eps, None)
        return np.tile(rv[:, None, None], (1, Ty, Fy))

    def _transform_target(y_raw, base3d=None):
        if target_mode == "log_var_ratio":
            if base3d is None:
                raise ValueError("log_var_ratio requires a baseline (base3d).")
            y_log = np.log(np.clip(y_raw, FLOOR_Y, None))
            b_log = np.log(np.clip(base3d, FLOOR_Y, None))
            return (y_log - b_log), {"mode": "log_var_ratio", "base": base3d}
        elif target_mode in ("log", "log_mse"):
            return np.log(np.clip(y_raw, FLOOR_Y, None)), {"mode": "log"}
        elif target_mode == "identity":
            return y_raw, {"mode": "identity"}
        else:
            raise ValueError(f"Unknown target_mode: {target_mode}")

    def _inverse_transform_target(y_model_space, ctx):
        m = ctx["mode"]
        if m == "log_var_ratio":
            return ctx["base"] * np.exp(y_model_space)
        elif m == "log":
            return np.exp(y_model_space)
        elif m == "identity":
            return y_model_space
        else:
            raise ValueError(f"Unknown inverse mode: {m}")

    def _fit_apply_scalers(Xp_core, Xt_core, y_core,
                           Xp_val, Xt_val, y_val,
                           Xp_test, Xt_test, y_test,
                           do_norm_X=True, do_norm_Time=False, do_norm_y=True,
                           y_scaler_kind="standard"):
        X_price_scaler = None; X_time_scaler = None; y_scaler = None
        if do_norm_X:
            _, X_price_scaler = scale_3d_array(Xp_core, type="standard")
            Xp_core_s = apply_scaler_to_3d_array(Xp_core, X_price_scaler)
            Xp_val_s  = apply_scaler_to_3d_array(Xp_val,  X_price_scaler)
            Xp_test_s = apply_scaler_to_3d_array(Xp_test, X_price_scaler)
        else:
            Xp_core_s, Xp_val_s, Xp_test_s = Xp_core, Xp_val, Xp_test
        if do_norm_Time and Xt_core is not None:
            _, X_time_scaler = scale_3d_array(Xt_core, type="standard")
            Xt_core_s = apply_scaler_to_3d_array(Xt_core, X_time_scaler)
            Xt_val_s  = apply_scaler_to_3d_array(Xt_val,  X_time_scaler)
            Xt_test_s = apply_scaler_to_3d_array(Xt_test, X_time_scaler)
        else:
            Xt_core_s, Xt_val_s, Xt_test_s = Xt_core, Xt_val, Xt_test
        if do_norm_y:
            _, y_scaler = scale_3d_array(y_core, type=y_scaler_kind)
            y_core_s = apply_scaler_to_3d_array(y_core, y_scaler)
            y_val_s  = apply_scaler_to_3d_array(y_val,  y_scaler)
            y_test_s = apply_scaler_to_3d_array(y_test, y_scaler)
        else:
            y_core_s, y_val_s, y_test_s = y_core, y_val, y_test
        return (Xp_core_s, Xt_core_s, y_core_s,
                Xp_val_s,  Xt_val_s,  y_val_s,
                Xp_test_s, Xt_test_s, y_test_s,
                X_price_scaler, X_time_scaler, y_scaler)

    def _prep_for_model(Xp, Xt, y_, merge_PT, do_flatten):
        X = combine_3D_inputs(Xp, Xt) if (merge_PT and Xt is not None) else Xp
        if do_flatten:
            X = flatten_3d_tensor_standard(X)
            y_ = flatten_3d_tensor_by_feature_first(y_)
        return X, y_

    def _build_task0_true_pred(y_true_3d, y_pred_3d):
        return np.stack([y_true_3d[:, :, 0], y_pred_3d[:, :, 0]], axis=-1)

    def _peek_mag(name, arr):
        if not debug_scaling or arr is None: return
        r = arr.reshape(-1, arr.shape[-1]); top5 = np.sort(np.abs(r).max(axis=0))[-5:]
        print(f"{name}: max|x| per feature (top 5): {top5}")

    def _flat_dim(x3d): 
        return 0 if x3d is None else (x3d.shape[1] * x3d.shape[2])
    def _feat_dim(x3d):
        return 0 if x3d is None else x3d.shape[2]

    if model_type == "Simple_MLP":
        input_dimension = (X_price.shape[1] * X_price.shape[2] + (_flat_dim(X_time) if merge_price_time else 0))
        model = SimpleMLPWrapper(input_dim=input_dimension,
                                 output_dim=y.shape[1] * y.shape[2],
                                 lr=lr, epochs=epochs, batch_size=batch_size,
                                 verbose=verbose, hidden_layers=hidden_layers, no_tasks=no_tasks,
                                 dropout=dropout, l2_weight=l2_weight,
                                 patience=patience, min_epochs=min_epochs, min_delta=min_delta,
                                 hidden_dim=hidden_dim, loss_type=loss_type, target_is_logvar=target_is_logvar)
    elif model_type == "Simple_LSTM":
        input_dimension = X_price.shape[2] + ((_feat_dim(X_time)) if merge_price_time else 0)
        model = SimpleLSTMWrapper(input_dim=input_dimension, epochs=epochs,
                                  verbose=verbose, batch_size=batch_size, lr=lr,
                                  no_tasks=no_tasks, hidden_layers=hidden_layers,
                                  output_dim=y.shape[2], pred_len=y.shape[1],
                                  dropout=dropout, l2_weight=l2_weight,
                                  patience=patience, min_epochs=min_epochs, min_delta=min_delta,
                                  hidden_dim=hidden_dim, loss_type=loss_type, target_is_logvar=target_is_logvar)
    elif model_type == "ITransformer":
        input_dimension = X_price.shape[2] + ((_feat_dim(X_time)) if merge_price_time else 0)
        model = ITransformerWrapper(input_len=X_price.shape[1], input_dim=input_dimension,
                                    output_len=y.shape[1], output_dim=y.shape[2],
                                    epochs=epochs, batch_size=batch_size, lr=lr,
                                    verbose=verbose, no_tasks=no_tasks, dropout=dropout,
                                    l2_weight=l2_weight, patience=patience,
                                    min_epochs=min_epochs, min_delta=min_delta, hidden_dim=hidden_dim,
                                    hidden_layers=hidden_layers,
                                    d_model=d_model, d_ff=d_ff, n_heads=n_heads, e_layers=e_layers,
                                    loss_type=loss_type, target_is_logvar=target_is_logvar)
    elif model_type == "TimesNet":
        input_dimension = X_price.shape[2] + ((_feat_dim(X_time)) if merge_price_time else 0)
        model = TimesNetWrapper(input_len=X_price.shape[1], input_dim=input_dimension,
                                output_len=y.shape[1], output_dim=y.shape[2],
                                epochs=epochs, batch_size=batch_size, lr=lr,
                                verbose=verbose, no_tasks=no_tasks, dropout=dropout,
                                l2_weight=l2_weight, patience=patience,
                                min_epochs=min_epochs, min_delta=min_delta, hidden_dim=hidden_dim,
                                hidden_layers=hidden_layers,
                                d_model=d_model, d_ff=d_ff, n_heads=n_heads, e_layers=e_layers,
                                loss_type=loss_type, target_is_logvar=target_is_logvar)
    elif model_type == "Custom_KAN":
        input_dimension = (X_price.shape[1] * X_price.shape[2] + (_flat_dim(X_time) if merge_price_time else 0))
        model = TorchHierarchicalKANWrapper(input_dim=input_dimension,
                                            output_dim=y.shape[1] * y.shape[2],
                                            lr=lr, epochs=epochs, batch_size=batch_size,
                                            verbose=verbose, no_tasks=no_tasks,
                                            knots=8, spline_power=3, dropout=dropout, l2_weight=l2_weight,
                                            patience=patience, min_epochs=min_epochs, min_delta=min_delta,
                                            hidden_dim=hidden_dim, warmup_aux_epochs=warmup_aux_epochs,
                                            joint_epochs=joint_epochs, hidden_layers=hidden_layers,
                                            loss_type=loss_type, target_is_logvar=target_is_logvar)
    elif model_type == "Simple_KAN":
        input_dimension = (X_price.shape[1] * X_price.shape[2] + (_flat_dim(X_time) if merge_price_time else 0))
        model = SimpleKANWrapper(input_dim=input_dimension,
                                 output_dim=y.shape[1] * y.shape[2],
                                 lr=lr, epochs=epochs, batch_size=batch_size,
                                 verbose=verbose, hidden_layers=hidden_layers,
                                 no_tasks=no_tasks, knots=knots, spline_power=spline_power,
                                 dropout=dropout, l2_weight=l2_weight,
                                 patience=patience, min_epochs=min_epochs, min_delta=min_delta,
                                 hidden_dim=hidden_dim, loss_type=loss_type, target_is_logvar=target_is_logvar)
    elif model_type == "Custom_KAN_LSTM":
        input_dimension = (X_price.shape[1] * X_price.shape[2] + (_flat_dim(X_time) if merge_price_time else 0))
        model = TKANHierMTLWrapper(input_dim=input_dimension,
                                   output_dim=y.shape[2],
                                   pred_len=y.shape[1], 
                                   lr=lr, epochs=epochs, batch_size=batch_size,
                                   verbose=verbose, hidden_layers=hidden_layers,
                                   no_tasks=no_tasks, knots=knots, spline_power=spline_power,
                                   dropout=dropout, l2_weight=l2_weight,
                                   patience=patience, min_epochs=min_epochs, min_delta=min_delta,
                                   hidden_dim=hidden_dim, loss_type=loss_type, target_is_logvar=target_is_logvar)
    elif model_type == "LSTM_KAN":
        input_dimension = X_price.shape[2] + ((_feat_dim(X_time)) if merge_price_time else 0)
        model = TKANSeq2SeqWrapper(input_dim=input_dimension, epochs=epochs,
                                   verbose=verbose, batch_size=batch_size, lr=lr,
                                   no_tasks=no_tasks, hidden_layers=hidden_layers,
                                   output_dim=y.shape[2], pred_len=y.shape[1],
                                   knots=8, spline_power=3, dropout=dropout, l2_weight=l2_weight,
                                   patience=patience, min_epochs=min_epochs, min_delta=min_delta,
                                   hidden_dim=hidden_dim, loss_type=loss_type, target_is_logvar=target_is_logvar)
    else:
        raise ValueError("Model type not defined")

    results = {}; nested_results = {}
    best_model_final = None; best_params_final = None
    X_price_train_scaler = None; X_time_train_scaler = None
    scaler_y = None; y_task0_true_pred = None

    _reset_seeds(random_state, deterministic=deterministic_torch)

    batch_size_y, time_steps_y, num_features_y = y.shape
    print("Batch size for y:", batch_size_y)
    print("Time steps for y:", time_steps_y)
    print("Features for y:", num_features_y)

    def _make_train_val_from_train(Xp_tr_, Xt_tr_, y_tr_, frac=0.10):
        n = len(Xp_tr_); v = max(1, int(round(frac * n)))
        return (Xp_tr_[:-v], (Xt_tr_[:-v] if Xt_tr_ is not None else None), y_tr_[:-v],
                Xp_tr_[-v:],  (Xt_tr_[-v:]  if Xt_tr_ is not None else None), y_tr_[-v:])

    def _score_block(y_pred_block, y_true_block, ctx_block, scaler_block, is_scaled, metric):
        if flatten:
            yp3 = unflatten_3d_tensor_by_feature_first(y_pred_block, time_steps_y, num_features_y)
            yt3 = unflatten_3d_tensor_by_feature_first(y_true_block, time_steps_y, num_features_y)
        else:
            yp3 = y_pred_block; yt3 = y_true_block
        if metric == "nll":
            if target_mode != "log":
                raise ValueError("metric_for_early_stop='nll' requires target_mode='log'")
            z = yp3; v = np.exp(yt3)
            with np.errstate(over='ignore'):
                nll = 0.5*(v*np.exp(-z)+z)
            return float(np.mean(nll))
        if metric == "mse_scaled":
            return mean_squared_error(yt3.reshape(-1), yp3.reshape(-1))
        if metric == "mse_model":
            if is_scaled:
                yp_m = inverse_scale_3d(yp3, scaler_block)
                yt_m = inverse_scale_3d(yt3, scaler_block)
            else:
                yp_m = yp3; yt_m = yt3
            yp_inv = _inverse_transform_target(yp_m, ctx_block)
            yt_inv = _inverse_transform_target(yt_m, ctx_block)
            return mean_squared_error(yt_inv.reshape(-1), yp_inv.reshape(-1))
        raise ValueError(f"Unknown metric_for_early_stop: {metric}")

    if not use_nested_cv and not single_holdout:
        print("\nStarting training without CV:")
        split_idx = int(0.8 * len(X_price))
        Xp_tr, Xp_te = X_price[:split_idx], X_price[split_idx:]
        Xt_tr, Xt_te = (X_time[:split_idx], X_time[split_idx:]) if X_time is not None else (None, None)
        y_tr_raw, y_te_raw  = y[:split_idx], y[split_idx:]

        Xp_core, Xt_core, y_core_raw, Xp_val, Xt_val, y_val_raw = _make_train_val_from_train(Xp_tr, Xt_tr, y_tr_raw, frac=val_frac)

        base_core = base_val = base_te = None
        if target_mode == "log_var_ratio":
            base_core = _build_baseline_var_from_Xp(Xp_core, y_core_raw, feat_idx=baseline_feature_idx, window=baseline_window, eps=baseline_eps)
            base_val  = _build_baseline_var_from_Xp(Xp_val,  y_val_raw,  feat_idx=baseline_feature_idx, window=baseline_window, eps=baseline_eps)
            base_te   = _build_baseline_var_from_Xp(Xp_te,   y_te_raw,   feat_idx=baseline_feature_idx, window=baseline_window, eps=baseline_eps)

        y_core, ctx_core = _transform_target(y_core_raw, base_core)
        y_val,  ctx_val  = _transform_target(y_val_raw,  base_val)
        y_te,   ctx_te   = _transform_target(y_te_raw,   base_te)

        (Xp_core_s, Xt_core_s, y_core_s,
         Xp_val_s, Xt_val_s, y_val_s,
         Xp_te_s, Xt_te_s, y_te_s,
         X_price_train_scaler, X_time_train_scaler, scaler_y) = _fit_apply_scalers(
            Xp_core, Xt_core, y_core, Xp_val, Xt_val, y_val, Xp_te, Xt_te, y_te,
            do_norm_X=normalize_X, do_norm_Time=normalize_Time, do_norm_y=normalize_y,
            y_scaler_kind=y_scale_type
        )

        if verbose:
            _peek_mag("Xp_core_s", Xp_core_s); _peek_mag("Xp_val_s",  Xp_val_s)
            if Xt_core_s is not None: _peek_mag("Xt_core_s", Xt_core_s)
            if Xt_val_s  is not None: _peek_mag("Xt_val_s",  Xt_val_s)
            check_data_issues(Xp_core_s, label="X_price_train_core")
            if Xt_core_s is not None: check_data_issues(Xt_core_s, label="X_time_train_core")
            check_data_issues(y_core_s, label=f"y_train_core ({target_mode} scaled)")
            check_data_issues(Xp_val_s, label="X_price_val")
            if Xt_val_s is not None: check_data_issues(Xt_val_s, label="X_time_val")
            check_data_issues(y_val_s, label=f"y_val ({target_mode} scaled)")
            check_data_issues(Xp_te_s, label="X_price_test")
            if Xt_te_s is not None: check_data_issues(Xt_te_s, label="X_time_test")
            check_data_issues(y_te_s, label=f"y_test ({target_mode} scaled)")

        X_train, y_train = _prep_for_model(Xp_core_s, Xt_core_s, y_core_s, merge_price_time, flatten)
        X_val__, y_val__ = _prep_for_model(Xp_val_s, Xt_val_s, y_val_s, merge_price_time, flatten)
        X_test, y_test_scaled = _prep_for_model(Xp_te_s, Xt_te_s, y_te_s, merge_price_time, flatten)

        model.fit(X_train, y_train, X_val__, y_val__)

        if hasattr(model, 'get_params'):
            used_params = model.get_params()
            print("\nParameters used in the single-fit model:")
            for k, v in used_params.items():
                print(f"{k}: {v:.8f}" if isinstance(v, float) else f"{k}: {v}")

        y_pred = model.predict(X_test)

        if flatten:
            y_pred_3d = unflatten_3d_tensor_by_feature_first(y_pred, time_steps_y, num_features_y)
            y_test_scaled_3d = unflatten_3d_tensor_by_feature_first(y_test_scaled, time_steps_y, num_features_y)
        else:
            y_pred_3d = y_pred; y_test_scaled_3d = y_test_scaled

        if normalize_y:
            y_pred_model_space = inverse_scale_3d(y_pred_3d, scaler_y)
            y_true_model_space = inverse_scale_3d(y_test_scaled_3d, scaler_y)
        else:
            y_pred_model_space = y_pred_3d
            y_true_model_space = y_test_scaled_3d

        y_pred_inv = _inverse_transform_target(y_pred_model_space, ctx_te)
        y_true_inv = _inverse_transform_target(y_true_model_space, ctx_te)

        results.clear()
        for i in range(no_tasks):
            results[f"Task {i+1}"] = evaluate_predictions(y_true_inv[:, :, i], y_pred_inv[:, :, i])

        if verbose:
            print("\nResults for regular train/test evaluation (without CV):")
            for task_name, task_result in results.items():
                print(f"\n--- {task_name} ---")
                for metric_name, metric_value in task_result.items():
                    print(f"{metric_name:<35}: {metric_value:.8f}" if isinstance(metric_value, float) else f"{metric_name:<35}: {metric_value}")

        best_model_final = model
        best_params_final = model.get_params() if hasattr(model, 'get_params') else None
        y_task0_true_pred = _build_task0_true_pred(y_true_inv, y_pred_inv)

        if save_model_path and best_model_final is not None:
            os.makedirs(os.path.dirname(save_model_path), exist_ok=True)
            save_dict = {
                "model": best_model_final,
                "results": results,
                "nested_results": nested_results,
                "best_params": best_params_final,
                "X_price_scaler": X_price_train_scaler if normalize_X else None,
                "X_time_scaler": X_time_train_scaler if normalize_Time else None,
                "y_scaler": scaler_y if normalize_y else None,
                "target_mode": target_mode,
                "baseline_feature_idx": baseline_feature_idx,
                "baseline_window": baseline_window,
            }
            with open(save_model_path, "wb") as f:
                pickle.dump(save_dict, f)
            print(f"\nBest model (no-CV) saved to: {save_model_path}")

        print(f"\nSaved y_true min={np.nanmin(y_task0_true_pred[:,:,0]):.6g}, max={np.nanmax(y_task0_true_pred[:,:,0]):.6g}")
        print(f"Saved y_pred min={np.nanmin(y_task0_true_pred[:,:,1]):.6g}, max={np.nanmax(y_task0_true_pred[:,:,1]):.6g}")
        return results, nested_results, best_model_final, best_params_final, y_task0_true_pred

    if single_holdout and not use_nested_cv:
        print("\nSingle holdout: one train/test split, param search on a single train/val (early-stop) split.")
        split_idx = int(0.8 * len(X_price))
        Xp_tr, Xp_te = X_price[:split_idx], X_price[split_idx:]
        Xt_tr, Xt_te = (X_time[:split_idx], X_time[split_idx:]) if X_time is not None else (None, None)
        y_tr_raw,  y_te_raw  = y[:split_idx], y[split_idx:]

        Xp_core, Xt_core, y_core_raw, Xp_val, Xt_val, y_val_raw = _make_train_val_from_train(Xp_tr, Xt_tr, y_tr_raw, frac=val_frac)

        base_core = base_val = base_te = None
        if target_mode == "log_var_ratio":
            base_core = _build_baseline_var_from_Xp(Xp_core, y_core_raw, feat_idx=baseline_feature_idx, window=baseline_window, eps=baseline_eps)
            base_val  = _build_baseline_var_from_Xp(Xp_val,  y_val_raw,  feat_idx=baseline_feature_idx, window=baseline_window, eps=baseline_eps)
            base_te   = _build_baseline_var_from_Xp(Xp_te,   y_te_raw,   feat_idx=baseline_feature_idx, window=baseline_window, eps=baseline_eps)

        y_core, ctx_core = _transform_target(y_core_raw, base_core)
        y_val,  ctx_val  = _transform_target(y_val_raw,  base_val)
        y_te,   ctx_te   = _transform_target(y_te_raw,   base_te)

        (Xp_core_s, Xt_core_s, y_core_s,
         Xp_val_s, Xt_val_s, y_val_s,
         Xp_te_s, Xt_te_s, y_te_s,
         X_price_train_scaler, X_time_train_scaler, scaler_y) = _fit_apply_scalers(
            Xp_core, Xt_core, y_core, Xp_val, Xt_val, y_val, Xp_te, Xt_te, y_te,
            do_norm_X=normalize_X, do_norm_Time=normalize_Time, do_norm_y=normalize_y,
            y_scaler_kind=y_scale_type
        )

        X_train, y_train = _prep_for_model(Xp_core_s, Xt_core_s, y_core_s, merge_price_time, flatten)
        X_val__, y_val__ = _prep_for_model(Xp_val_s, Xt_val_s, y_val_s, merge_price_time, flatten)

        best_val_loss = float("inf")
        best_grid_params = None

        if param_grid is None or len(param_grid) == 0:
            _reset_seeds(random_state, deterministic=deterministic_torch)
            tmp_model = safe_clone(model)
            tmp_model.fit(X_train, y_train, X_val__, y_val__)
            best_grid_params = getattr(tmp_model, "get_params", lambda: None)()
        else:
            for param_set in ParameterGrid(param_grid):
                _reset_seeds(random_state, deterministic=deterministic_torch)
                model_inner = safe_clone(model)
                if hasattr(model_inner, "set_params") and param_set:
                    model_inner.set_params(**param_set)
                try:
                    model_inner.fit(X_train, y_train, X_val__, y_val__)
                    yv_pred = model_inner.predict(X_val__)
                    val_metric = _score_block(yv_pred, y_val__, ctx_val, scaler_y, normalize_y, metric_for_early_stop)
                    if val_metric < best_val_loss:
                        best_val_loss = val_metric
                        best_grid_params = param_set.copy()
                except Exception as e:
                    if verbose:
                        print(f"Skipped param set {param_set} due to error: {e}")
                    continue

        if best_grid_params and len(best_grid_params) > 0:
            print("\nBest parameters found in single holdout (grid):")
            for k, v in best_grid_params.items():
                print(f"  {k}: {v}")
        else:
            print("\nSingle holdout: no grid provided; proceeding to refit with current model params.")

        print("\nRefitting with selected hyperparameters on (train_core + train_val), new early-stop split from that union…")

        Xp_tr_full = np.concatenate([Xp_core, Xp_val], axis=0)
        Xt_tr_full = (np.concatenate([Xt_core, Xt_val], axis=0) if Xt_core is not None else None)
        y_tr_full  = np.concatenate([y_core,  y_val], axis=0)

        Xp_core_r, Xt_core_r, y_core_r, Xp_esv_r, Xt_esv_r, y_esv_r = _make_train_val_from_train(Xp_tr_full, Xt_tr_full, y_tr_full, frac=esv_frac)

        (Xp_core_sr, Xt_core_sr, y_core_sr,
         Xp_esv_sr, Xt_esv_sr, y_esv_sr,
         Xp_te_sr,  Xt_te_sr,  y_te_sr,
         X_price_train_scaler, X_time_train_scaler, scaler_y) = _fit_apply_scalers(
            Xp_core_r, Xt_core_r, y_core_r,
            Xp_esv_r, Xt_esv_r, y_esv_r,
            Xp_te, Xt_te, y_te,
            do_norm_X=normalize_X, do_norm_Time=normalize_Time, do_norm_y=normalize_y,
            y_scaler_kind=y_scale_type
        )

        X_tr_refit,  y_tr_refit  = _prep_for_model(Xp_core_sr, Xt_core_sr, y_core_sr, merge_price_time, flatten)
        X_esv_refit, y_esv_refit = _prep_for_model(Xp_esv_sr,  Xt_esv_sr,  y_esv_sr,  merge_price_time, flatten)
        X_te_refit,  y_te_scaled = _prep_for_model(Xp_te_sr,   Xt_te_sr,   y_te_sr,   merge_price_time, flatten)

        best_model_refit = safe_clone(model)
        if best_grid_params and hasattr(best_model_refit, "set_params"):
            best_model_refit.set_params(**best_grid_params)

        _reset_seeds(random_state, deterministic=deterministic_torch)
        best_model_refit.fit(X_tr_refit, y_tr_refit, X_esv_refit, y_esv_refit)

        y_pred_refit = best_model_refit.predict(X_te_refit)
        if flatten:
            y_pred_refit_3d = unflatten_3d_tensor_by_feature_first(y_pred_refit, time_steps_y, num_features_y)
            y_test_scaled_3d = unflatten_3d_tensor_by_feature_first(y_te_scaled, time_steps_y, num_features_y)
        else:
            y_pred_refit_3d = y_pred_refit; y_test_scaled_3d = y_te_scaled

        if normalize_y:
            y_pred_model = inverse_scale_3d(y_pred_refit_3d, scaler_y)
            y_true_model = inverse_scale_3d(y_test_scaled_3d, scaler_y)
        else:
            y_pred_model = y_pred_refit_3d; y_true_model = y_test_scaled_3d

        y_pred_inv = _inverse_transform_target(y_pred_model, ctx_te)
        y_true_inv = _inverse_transform_target(y_true_model, ctx_te)

        results.clear()
        for i in range(no_tasks):
            results[f"Task {i+1}"] = evaluate_predictions(y_true_inv[:, :, i], y_pred_inv[:, :, i])

        if verbose:
            print("\nResults for the refit model (single holdout):")
            for task_name, task_result in results.items():
                print(f"\n--- {task_name} ---")
                for metric_name, metric_value in task_result.items():
                    print(f"{metric_name:<35}: {metric_value:.8f}" if isinstance(metric_value, float) else f"{metric_name:<35}: {metric_value}")

        best_model_final = best_model_refit
        best_params_final = best_model_final.get_params() if hasattr(best_model_final, "get_params") else (best_grid_params or {})
        y_task0_true_pred = _build_task0_true_pred(y_true_inv, y_pred_inv)
        print(f"\nSaved y_true min={np.nanmin(y_task0_true_pred[:,:,0]):.6g}, max={np.nanmax(y_task0_true_pred[:,:,0]):.6g}")
        print(f"Saved y_pred min={np.nanmin(y_task0_true_pred[:,:,1]):.6g}, max={np.nanmax(y_task0_true_pred[:,:,1]):.6g}")

        if save_model_path and best_model_final is not None:
            os.makedirs(os.path.dirname(save_model_path), exist_ok=True)
            save_dict = {
                "model": best_model_final,
                "results": results,
                "nested_results": nested_results,
                "best_params": best_params_final,
                "X_price_scaler": X_price_train_scaler if normalize_X else None,
                "X_time_scaler": X_time_train_scaler if normalize_Time else None,
                "y_scaler": scaler_y if normalize_y else None,
                "target_mode": target_mode,
                "baseline_feature_idx": baseline_feature_idx,
                "baseline_window": baseline_window,
            }
            with open(save_model_path, "wb") as f:
                pickle.dump(save_dict, f)
            print(f"\nBest single-holdout (refit) model saved to: {save_model_path}")

        return results, nested_results, best_model_final, best_params_final, y_task0_true_pred

    if verbose:
        print("\nNested cross-validation with grid search:")

    outer_cv = _cv_splitter(cv_strategy, outer_folds)
    nested_scores = {i: [] for i in range(no_tasks)}
    best_score = float("inf")
    best_params_final = None; best_model_final = None

    for fold_idx, (train_idx, test_idx) in enumerate(outer_cv.split(X_price)):
        if verbose:
            print(f"Outer fold {fold_idx + 1}:")
        Xp_tr_full, Xp_te = X_price[train_idx], X_price[test_idx]
        if X_time is not None:
            Xt_tr_full, Xt_te = X_time[train_idx],  X_time[test_idx]
        else:
            Xt_tr_full, Xt_te = None, None
        y_tr_full_raw,  y_te_raw  = y[train_idx], y[test_idx]

        inner_cv = _cv_splitter(cv_strategy, inner_folds)
        best_val_loss = float("inf"); best_params = None

        search_grid = ParameterGrid(param_grid) if (param_grid is not None and len(param_grid) > 0) else [dict()]

        for param_set in search_grid:
            fold_val_losses = []
            for inner_fold_idx, (inner_tr_idx, inner_val_idx) in enumerate(inner_cv.split(Xp_tr_full)):
                Xp_inner_tr, Xp_inner_val = Xp_tr_full[inner_tr_idx], Xp_tr_full[inner_val_idx]
                if Xt_tr_full is not None:
                    Xt_inner_tr, Xt_inner_val = Xt_tr_full[inner_tr_idx], Xt_tr_full[inner_val_idx]
                else:
                    Xt_inner_tr, Xt_inner_val = None, None
                y_inner_tr_raw, y_inner_val_raw = y_tr_full_raw[inner_tr_idx], y_tr_full_raw[inner_val_idx]

                Xp_core, Xt_core, y_core_raw, Xp_esv, Xt_esv, y_esv_raw = _make_train_val_from_train(Xp_inner_tr, Xt_inner_tr, y_inner_tr_raw, frac=esv_frac)

                base_core = base_esv = base_val = None
                if target_mode == "log_var_ratio":
                    base_core = _build_baseline_var_from_Xp(Xp_core, y_core_raw, feat_idx=baseline_feature_idx, window=baseline_window, eps=baseline_eps)
                    base_esv  = _build_baseline_var_from_Xp(Xp_esv,  y_esv_raw,  feat_idx=baseline_feature_idx, window=baseline_window, eps=baseline_eps)
                    base_val  = _build_baseline_var_from_Xp(Xp_inner_val,  y_inner_val_raw,  feat_idx=baseline_feature_idx, window=baseline_window, eps=baseline_eps)

                y_core, ctx_core = _transform_target(y_core_raw, base_core)
                y_esv,  ctx_esv  = _transform_target(y_esv_raw,  base_esv)
                y_val,  ctx_val  = _transform_target(y_inner_val_raw, base_val)

                (Xp_core_s, Xt_core_s, y_core_s,
                 Xp_esv_s, Xt_esv_s, y_esv_s,
                 Xp_val_s, Xt_val_s, y_val_s,
                 _, _, y_scaler_inner) = _fit_apply_scalers(
                    Xp_core, Xt_core, y_core, Xp_esv, Xt_esv, y_esv, Xp_inner_val, Xt_inner_val, y_val,
                    do_norm_X=normalize_X, do_norm_Time=normalize_Time, do_norm_y=normalize_y,
                    y_scaler_kind=y_scale_type
                )

                Xi_tr, yi_tr   = _prep_for_model(Xp_core_s, Xt_core_s, y_core_s, merge_price_time, flatten)
                Xi_esv, yi_esv = _prep_for_model(Xp_esv_s, Xt_esv_s, y_esv_s, merge_price_time, flatten)
                Xi_val, yi_val = _prep_for_model(Xp_val_s, Xt_val_s, y_val_s, merge_price_time, flatten)

                _reset_seeds(random_state, deterministic=deterministic_torch)
                model_inner = safe_clone(model)
                if hasattr(model_inner, "set_params") and param_set:
                    model_inner.set_params(**param_set)

                try:
                    if verbose:
                        print(f"\nSize of the X data for training in the inner fold {inner_fold_idx + 1}: {Xi_tr.shape}")
                        print(f"Size of the Y data for training in the inner fold {inner_fold_idx + 1}: {yi_tr.shape}")

                    model_inner.fit(Xi_tr, yi_tr, Xi_esv, yi_esv)
                    y_val_pred = model_inner.predict(Xi_val)
                    val_metric = _score_block(y_val_pred, yi_val, ctx_val, y_scaler_inner, normalize_y, metric_for_early_stop)
                    fold_val_losses.append(val_metric)
                except Exception as e:
                    if verbose:
                        print(f"Skipped param set {param_set} in inner fold {inner_fold_idx + 1} due to error: {e}")
                    fold_val_losses = [np.inf]; break

            avg_val_loss = np.mean(fold_val_losses)
            if avg_val_loss < best_val_loss:
                best_val_loss = avg_val_loss; best_params = param_set
            if avg_val_loss < best_score:
                best_score = avg_val_loss; best_params_final = param_set

        if best_params is None:
            continue

        Xp_core, Xt_core, y_core_raw, Xp_esv, Xt_esv, y_esv_raw = _make_train_val_from_train(Xp_tr_full, Xt_tr_full, y_tr_full_raw, frac=esv_frac)

        base_core = base_esv = base_te = None
        if target_mode == "log_var_ratio":
            base_core = _build_baseline_var_from_Xp(Xp_core, y_core_raw, feat_idx=baseline_feature_idx, window=baseline_window, eps=baseline_eps)
            base_esv  = _build_baseline_var_from_Xp(Xp_esv,  y_esv_raw,  feat_idx=baseline_feature_idx, window=baseline_window, eps=baseline_eps)
            base_te   = _build_baseline_var_from_Xp(Xp_te,   y_te_raw,   feat_idx=baseline_feature_idx, window=baseline_window, eps=baseline_eps)

        y_core, ctx_core = _transform_target(y_core_raw, base_core)
        y_esv,  ctx_esv  = _transform_target(y_esv_raw,  base_esv)
        y_te,   ctx_te   = _transform_target(y_te_raw,   base_te)

        (Xp_core_s, Xt_core_s, y_core_s,
         Xp_esv_s, Xt_esv_s, y_esv_s,
         Xp_te_s, Xt_te_s, y_te_s,
         X_price_train_scaler, X_time_train_scaler, scaler_y) = _fit_apply_scalers(
            Xp_core, Xt_core, y_core, Xp_esv, Xt_esv, y_esv, Xp_te, Xt_te, y_te,
            do_norm_X=normalize_X, do_norm_Time=normalize_Time, do_norm_y=normalize_y,
            y_scaler_kind=y_scale_type
        )

        X_train, y_train = _prep_for_model(Xp_core_s, Xt_core_s, y_core_s, merge_price_time, flatten)
        X_val__, y_val__ = _prep_for_model(Xp_esv_s, Xt_esv_s, y_esv_s, merge_price_time, flatten)
        X_test, y_test_scaled = _prep_for_model(Xp_te_s, Xt_te_s, y_te_s, merge_price_time, flatten)

        if verbose:
            print(f"\nSize of the X data for training in the outer fold: {X_train.shape}")
            print(f"Size of the Y data for training in the outer fold: {y_train.shape}")

        _reset_seeds(random_state, deterministic=deterministic_torch)
        model_fold = safe_clone(model)
        if hasattr(model_fold, "set_params") and best_params:
            model_fold.set_params(**best_params)

        model_fold.fit(X_train, y_train, X_val__, y_val__)
        y_pred = model_fold.predict(X_test)

        if flatten:
            y_pred_3d = unflatten_3d_tensor_by_feature_first(y_pred, time_steps_y, num_features_y)
            y_test_scaled_3d = unflatten_3d_tensor_by_feature_first(y_test_scaled, time_steps_y, num_features_y)
        else:
            y_pred_3d = y_pred; y_test_scaled_3d = y_test_scaled

        if normalize_y:
            y_pred_model = inverse_scale_3d(y_pred_3d, scaler_y)
            y_true_model = inverse_scale_3d(y_test_scaled_3d, scaler_y)
        else:
            y_pred_model = y_pred_3d; y_true_model = y_test_scaled_3d

        y_pred_inv = _inverse_transform_target(y_pred_model, ctx_te)
        y_true_inv = _inverse_transform_target(y_true_model, ctx_te)

        for i in range(no_tasks):
            task_y_true = y_true_inv[:, :, i]
            task_y_pred = y_pred_inv[:, :, i]
            metrics = evaluate_predictions(task_y_true, task_y_pred)
            nested_scores[i].append(metrics)

            if verbose:
                print(f"\nOuter Fold {fold_idx + 1} — Task {i + 1} Metrics:")
                for metric_name, metric_value in metrics.items():
                    print(f"  {metric_name:<35}: {metric_value:.4f}" if isinstance(metric_value, float) else f"  {metric_name:<35}: {metric_value}")

    if any(len(v) > 0 for v in nested_scores.values()):
        for task_idx, folds in nested_scores.items():
            if len(folds) == 0: 
                continue
            for metric in folds[0]:
                values = np.array([fold[metric] for fold in folds], dtype=float)
                mean_val = values.mean()
                ci95 = t.interval(0.95, df=len(values)-1, loc=mean_val, scale=values.std(ddof=1)/np.sqrt(len(values)))
                nested_results[f"Task {task_idx+1} — Nested {metric}"] = mean_val
                nested_results[f"Task {task_idx+1} — Nested {metric} 95% CI"] = ci95

        if verbose:
            print("\nNested CV Average Metrics with 95% CI (per task):")
            for k in sorted(nested_results):
                v = nested_results[k]
                if isinstance(v, tuple):
                    print(f"{k:<55}: ({v[0]:.4f}, {v[1]:.4f})")
                elif isinstance(v, float):
                    print(f"{k:<55}: {v:.4f}")
                else:
                    print(f"{k:<55}: {v}")

    if verbose and best_params_final is not None:
        print("\nBest parameters found via nested CV:")
        for k, v in best_params_final.items():
            print(f"  {k}: {v}")
        print("\nRefitting the final model with the best parameters found in the nested CV:")

    if best_params_final is not None:
        if final_refit_holdout_frac < 0 or final_refit_holdout_frac >= 1:
            raise ValueError("final_refit_holdout_frac must be in [0, 1).")
        split_idx = int(round((1 - final_refit_holdout_frac) * len(X_price)))
        if split_idx == len(X_price):
            Xp_tr, Xp_te = X_price, X_price[:0]
            Xt_tr, Xt_te = (X_time, X_time[:0]) if X_time is not None else (None, None)
            y_tr_raw, y_te_raw = y, y[:0]
        else:
            Xp_tr, Xp_te = X_price[:split_idx], X_price[split_idx:]
            Xt_tr, Xt_te = (X_time[:split_idx], X_time[split_idx:]) if X_time is not None else (None, None)
            y_tr_raw,  y_te_raw  = y[:split_idx], y[split_idx:]

        Xp_core, Xt_core, y_core_raw, Xp_esv, Xt_esv, y_esv_raw = _make_train_val_from_train(Xp_tr, Xt_tr, y_tr_raw, frac=esv_frac)

        base_core = base_esv = base_te = None
        if target_mode == "log_var_ratio":
            base_core = _build_baseline_var_from_Xp(Xp_core, y_core_raw, feat_idx=baseline_feature_idx, window=baseline_window, eps=baseline_eps)
            base_esv  = _build_baseline_var_from_Xp(Xp_esv,  y_esv_raw,  feat_idx=baseline_feature_idx, window=baseline_window, eps=baseline_eps)
            base_te   = _build_baseline_var_from_Xp(Xp_te,   y_te_raw,   feat_idx=baseline_feature_idx, window=baseline_window, eps=baseline_eps) if len(y_te_raw) else None

        y_core, ctx_core = _transform_target(y_core_raw, base_core)
        y_esv,  ctx_esv  = _transform_target(y_esv_raw,  base_esv)
        y_te,   ctx_te   = _transform_target(y_te_raw,   base_te) if len(y_te_raw) else (y_esv, ctx_esv)

        (Xp_core_s, Xt_core_s, y_core_s,
         Xp_esv_s, Xt_esv_s, y_esv_s,
         Xp_te_s, Xt_te_s, y_te_s,
         X_price_train_scaler, X_time_train_scaler, scaler_y) = _fit_apply_scalers(
            Xp_core, Xt_core, y_core, Xp_esv, Xt_esv, y_esv, Xp_te, Xt_te, y_te,
            do_norm_X=normalize_X, do_norm_Time=normalize_Time, do_norm_y=normalize_y,
            y_scaler_kind=y_scale_type
        )

        X_train_final, y_train_final = _prep_for_model(Xp_core_s, Xt_core_s, y_core_s, merge_price_time, flatten)
        X_val_final, y_val_final     = _prep_for_model(Xp_esv_s, Xt_esv_s, y_esv_s, merge_price_time, flatten)
        X_test_final, y_test_final_s = _prep_for_model(Xp_te_s, Xt_te_s, y_te_s, merge_price_time, flatten) if len(y_te_raw) else (None, None)

        best_model_final = safe_clone(model)
        if hasattr(best_model_final, "set_params"):
            best_model_final.set_params(**best_params_final)

        print(f"\nSize of the X data for training: {X_train_final.shape}")
        print(f"Size of the Y data for training: {y_train_final.shape}")

        _reset_seeds(random_state, deterministic=deterministic_torch)
        best_model_final.fit(X_train_final, y_train_final, X_val_final, y_val_final)

        if X_test_final is not None:
            y_pred_final = best_model_final.predict(X_test_final)
            if flatten:
                y_pred_final_3d = unflatten_3d_tensor_by_feature_first(y_pred_final, time_steps_y, num_features_y)
                y_test_final_s_3d = unflatten_3d_tensor_by_feature_first(y_test_final_s, time_steps_y, num_features_y)
            else:
                y_pred_final_3d = y_pred_final; y_test_final_s_3d = y_test_final_s

            if normalize_y:
                y_pred_final_model = inverse_scale_3d(y_pred_final_3d, scaler_y)
                y_true_final_model = inverse_scale_3d(y_test_final_s_3d, scaler_y)
            else:
                y_pred_final_model = y_pred_final_3d; y_true_final_model = y_test_final_s_3d

            y_pred_final_inv = _inverse_transform_target(y_pred_final_model, ctx_te)
            y_true_final_inv = _inverse_transform_target(y_true_final_model, ctx_te)

            if verbose:
                print("\nResults after refitting the final model:")
                for i in range(no_tasks):
                    final_metrics = evaluate_predictions(y_true_final_inv[:, :, i], y_pred_final_inv[:, :, i])
                    print(f"\nTask {i + 1} Final Metrics:")
                    for metric_name, metric_value in final_metrics.items():
                        print(f"  {metric_name:<40}: {metric_value:.4f}" if isinstance(metric_value, float) else f"  {metric_name:<40}: {metric_value}")

            y_task0_true_pred = _build_task0_true_pred(y_true_final_inv, y_pred_final_inv)
            print(f"\nSaved y_true min={np.nanmin(y_task0_true_pred[:,:,0]):.6g}, max={np.nanmax(y_task0_true_pred[:,:,0]):.6g}")
            print(f"Saved y_pred min={np.nanmin(y_task0_true_pred[:,:,1]):.6g}, max={np.nanmax(y_task0_true_pred[:,:,1]):.6g}")
        else:
            y_task0_true_pred = None
            print("\nFinal refit performed on all data; no holdout set to report metrics.")

        if save_model_path and best_model_final is not None:
            os.makedirs(os.path.dirname(save_model_path), exist_ok=True)
            save_dict = {
                "model": best_model_final,
                "results": results,
                "nested_results": nested_results,
                "best_params": best_model_final.get_params() if hasattr(best_model_final, "get_params") else best_params_final,
                "X_price_scaler": X_price_train_scaler if normalize_X else None,
                "X_time_scaler": X_time_train_scaler if normalize_Time else None,
                "y_scaler": scaler_y if normalize_y else None,
                "target_mode": target_mode,
                "baseline_feature_idx": baseline_feature_idx,
                "baseline_window": baseline_window,
            }
            with open(save_model_path, "wb") as f:
                pickle.dump(save_dict, f)
            print(f"\nFinal nested-CV model saved to: {save_model_path}")

    return results, nested_results, best_model_final, best_params_final, y_task0_true_pred

Loading data¶

The objects containing the data can be loaded so most of the previous sections are no longer necessary.

Sumarining helper¶

In [3]:
import numpy as np
import pandas as pd

def summarize_features(arr, col_names=None, name="array"):
    if arr is None:
        print(f"[{name}] is None")
        return None

    arr = np.asarray(arr)
    if arr.ndim == 3:
        B, T, F = arr.shape
        flat = arr.reshape(-1, F)  
    elif arr.ndim == 2:
        S, F = arr.shape
        flat = arr
    else:
        raise ValueError(f"[{name}] expected 2D or 3D array; got shape {arr.shape}")

    
    n_nan = np.isnan(flat).sum()
    n_posinf = np.isposinf(flat).sum()
    n_neginf = np.isneginf(flat).sum()
    n_inf = n_posinf + n_neginf
    print(f"[{name}] shape={arr.shape} | NaN: {n_nan} | +Inf: {n_posinf} | -Inf: {n_neginf} | Any Inf: {n_inf}")

   
    safe = np.where(np.isfinite(flat), flat, np.nan)

    mins = np.nanmin(safe, axis=0)
    maxs = np.nanmax(safe, axis=0)
    means = np.nanmean(safe, axis=0)
    stds = np.nanstd(safe, axis=0)
    p1 = np.nanpercentile(safe, 1, axis=0)
    p50 = np.nanpercentile(safe, 50, axis=0)
    p99 = np.nanpercentile(safe, 99, axis=0)

    if col_names is None or len(col_names) != safe.shape[1]:
        col_names = [f"f{i}" for i in range(safe.shape[1])]

    df = pd.DataFrame({
        "feature": col_names,
        "min": mins,
        "p01": p1,
        "median": p50,
        "p99": p99,
        "max": maxs,
        "mean": means,
        "std": stds,
    })
    
    df["abs_max"] = np.maximum(np.abs(df["min"]), np.abs(df["max"]))
    df = df.sort_values("abs_max", ascending=False).drop(columns=["abs_max"]).reset_index(drop=True)
    return df

Loading regular data¶

In [4]:
import os
import pickle

objects_relative_path = "Saved_objects"
load_data_object_file_path = os.path.join(root_folder, objects_relative_path, "structured_data_dict.pkl")

with open(load_data_object_file_path, "rb") as f:
    structured_data_dict = pickle.load(f)

print("Data dictionary loaded successfully.")

aapl_X_price = structured_data_dict["AAPL"].get("X_other")
aapl_X_time = structured_data_dict["AAPL"].get("X_time")
aapl_y = structured_data_dict["AAPL"].get("y")


eur_X_price = structured_data_dict["EURUSD"].get("X_other")
eur_X_time = structured_data_dict["EURUSD"].get("X_time")
eur_y = structured_data_dict["EURUSD"].get("y")
eur_y_columns = structured_data_dict["EURUSD"].get("y_columns")
print(eur_y_columns)
print(eur_y_columns)
Data dictionary loaded successfully.
['rv_t+1', 'lg_return_t+1', 'y_best_alpha_1_t+1', 'y_best_beta_1_t+1', 'y_best_omega_t+1', 'y_best_mu_t+1', 'y_best_nu_tgarch_t+1', 'rv_t+2', 'lg_return_t+2', 'y_best_alpha_1_t+2', 'y_best_beta_1_t+2', 'y_best_omega_t+2', 'y_best_mu_t+2', 'y_best_nu_tgarch_t+2', 'rv_t+3', 'lg_return_t+3', 'y_best_alpha_1_t+3', 'y_best_beta_1_t+3', 'y_best_omega_t+3', 'y_best_mu_t+3', 'y_best_nu_tgarch_t+3', 'rv_t+4', 'lg_return_t+4', 'y_best_alpha_1_t+4', 'y_best_beta_1_t+4', 'y_best_omega_t+4', 'y_best_mu_t+4', 'y_best_nu_tgarch_t+4', 'rv_t+5', 'lg_return_t+5', 'y_best_alpha_1_t+5', 'y_best_beta_1_t+5', 'y_best_omega_t+5', 'y_best_mu_t+5', 'y_best_nu_tgarch_t+5', 'rv_t+6', 'lg_return_t+6', 'y_best_alpha_1_t+6', 'y_best_beta_1_t+6', 'y_best_omega_t+6', 'y_best_mu_t+6', 'y_best_nu_tgarch_t+6', 'rv_t+7', 'lg_return_t+7', 'y_best_alpha_1_t+7', 'y_best_beta_1_t+7', 'y_best_omega_t+7', 'y_best_mu_t+7', 'y_best_nu_tgarch_t+7', 'rv_t+8', 'lg_return_t+8', 'y_best_alpha_1_t+8', 'y_best_beta_1_t+8', 'y_best_omega_t+8', 'y_best_mu_t+8', 'y_best_nu_tgarch_t+8', 'rv_t+9', 'lg_return_t+9', 'y_best_alpha_1_t+9', 'y_best_beta_1_t+9', 'y_best_omega_t+9', 'y_best_mu_t+9', 'y_best_nu_tgarch_t+9', 'rv_t+10', 'lg_return_t+10', 'y_best_alpha_1_t+10', 'y_best_beta_1_t+10', 'y_best_omega_t+10', 'y_best_mu_t+10', 'y_best_nu_tgarch_t+10', 'rv_t+11', 'lg_return_t+11', 'y_best_alpha_1_t+11', 'y_best_beta_1_t+11', 'y_best_omega_t+11', 'y_best_mu_t+11', 'y_best_nu_tgarch_t+11', 'rv_t+12', 'lg_return_t+12', 'y_best_alpha_1_t+12', 'y_best_beta_1_t+12', 'y_best_omega_t+12', 'y_best_mu_t+12', 'y_best_nu_tgarch_t+12', 'rv_t+13', 'lg_return_t+13', 'y_best_alpha_1_t+13', 'y_best_beta_1_t+13', 'y_best_omega_t+13', 'y_best_mu_t+13', 'y_best_nu_tgarch_t+13', 'rv_t+14', 'lg_return_t+14', 'y_best_alpha_1_t+14', 'y_best_beta_1_t+14', 'y_best_omega_t+14', 'y_best_mu_t+14', 'y_best_nu_tgarch_t+14', 'rv_t+15', 'lg_return_t+15', 'y_best_alpha_1_t+15', 'y_best_beta_1_t+15', 'y_best_omega_t+15', 'y_best_mu_t+15', 'y_best_nu_tgarch_t+15', 'rv_t+16', 'lg_return_t+16', 'y_best_alpha_1_t+16', 'y_best_beta_1_t+16', 'y_best_omega_t+16', 'y_best_mu_t+16', 'y_best_nu_tgarch_t+16', 'rv_t+17', 'lg_return_t+17', 'y_best_alpha_1_t+17', 'y_best_beta_1_t+17', 'y_best_omega_t+17', 'y_best_mu_t+17', 'y_best_nu_tgarch_t+17', 'rv_t+18', 'lg_return_t+18', 'y_best_alpha_1_t+18', 'y_best_beta_1_t+18', 'y_best_omega_t+18', 'y_best_mu_t+18', 'y_best_nu_tgarch_t+18', 'rv_t+19', 'lg_return_t+19', 'y_best_alpha_1_t+19', 'y_best_beta_1_t+19', 'y_best_omega_t+19', 'y_best_mu_t+19', 'y_best_nu_tgarch_t+19', 'rv_t+20', 'lg_return_t+20', 'y_best_alpha_1_t+20', 'y_best_beta_1_t+20', 'y_best_omega_t+20', 'y_best_mu_t+20', 'y_best_nu_tgarch_t+20', 'rv_t+21', 'lg_return_t+21', 'y_best_alpha_1_t+21', 'y_best_beta_1_t+21', 'y_best_omega_t+21', 'y_best_mu_t+21', 'y_best_nu_tgarch_t+21', 'rv_t+22', 'lg_return_t+22', 'y_best_alpha_1_t+22', 'y_best_beta_1_t+22', 'y_best_omega_t+22', 'y_best_mu_t+22', 'y_best_nu_tgarch_t+22', 'rv_t+23', 'lg_return_t+23', 'y_best_alpha_1_t+23', 'y_best_beta_1_t+23', 'y_best_omega_t+23', 'y_best_mu_t+23', 'y_best_nu_tgarch_t+23', 'rv_t+24', 'lg_return_t+24', 'y_best_alpha_1_t+24', 'y_best_beta_1_t+24', 'y_best_omega_t+24', 'y_best_mu_t+24', 'y_best_nu_tgarch_t+24', 'rv_t+25', 'lg_return_t+25', 'y_best_alpha_1_t+25', 'y_best_beta_1_t+25', 'y_best_omega_t+25', 'y_best_mu_t+25', 'y_best_nu_tgarch_t+25', 'rv_t+26', 'lg_return_t+26', 'y_best_alpha_1_t+26', 'y_best_beta_1_t+26', 'y_best_omega_t+26', 'y_best_mu_t+26', 'y_best_nu_tgarch_t+26', 'rv_t+27', 'lg_return_t+27', 'y_best_alpha_1_t+27', 'y_best_beta_1_t+27', 'y_best_omega_t+27', 'y_best_mu_t+27', 'y_best_nu_tgarch_t+27', 'rv_t+28', 'lg_return_t+28', 'y_best_alpha_1_t+28', 'y_best_beta_1_t+28', 'y_best_omega_t+28', 'y_best_mu_t+28', 'y_best_nu_tgarch_t+28', 'rv_t+29', 'lg_return_t+29', 'y_best_alpha_1_t+29', 'y_best_beta_1_t+29', 'y_best_omega_t+29', 'y_best_mu_t+29', 'y_best_nu_tgarch_t+29', 'rv_t+30', 'lg_return_t+30', 'y_best_alpha_1_t+30', 'y_best_beta_1_t+30', 'y_best_omega_t+30', 'y_best_mu_t+30', 'y_best_nu_tgarch_t+30', 'rv_t+31', 'lg_return_t+31', 'y_best_alpha_1_t+31', 'y_best_beta_1_t+31', 'y_best_omega_t+31', 'y_best_mu_t+31', 'y_best_nu_tgarch_t+31', 'rv_t+32', 'lg_return_t+32', 'y_best_alpha_1_t+32', 'y_best_beta_1_t+32', 'y_best_omega_t+32', 'y_best_mu_t+32', 'y_best_nu_tgarch_t+32', 'rv_t+33', 'lg_return_t+33', 'y_best_alpha_1_t+33', 'y_best_beta_1_t+33', 'y_best_omega_t+33', 'y_best_mu_t+33', 'y_best_nu_tgarch_t+33', 'rv_t+34', 'lg_return_t+34', 'y_best_alpha_1_t+34', 'y_best_beta_1_t+34', 'y_best_omega_t+34', 'y_best_mu_t+34', 'y_best_nu_tgarch_t+34', 'rv_t+35', 'lg_return_t+35', 'y_best_alpha_1_t+35', 'y_best_beta_1_t+35', 'y_best_omega_t+35', 'y_best_mu_t+35', 'y_best_nu_tgarch_t+35', 'rv_t+36', 'lg_return_t+36', 'y_best_alpha_1_t+36', 'y_best_beta_1_t+36', 'y_best_omega_t+36', 'y_best_mu_t+36', 'y_best_nu_tgarch_t+36', 'rv_t+37', 'lg_return_t+37', 'y_best_alpha_1_t+37', 'y_best_beta_1_t+37', 'y_best_omega_t+37', 'y_best_mu_t+37', 'y_best_nu_tgarch_t+37', 'rv_t+38', 'lg_return_t+38', 'y_best_alpha_1_t+38', 'y_best_beta_1_t+38', 'y_best_omega_t+38', 'y_best_mu_t+38', 'y_best_nu_tgarch_t+38', 'rv_t+39', 'lg_return_t+39', 'y_best_alpha_1_t+39', 'y_best_beta_1_t+39', 'y_best_omega_t+39', 'y_best_mu_t+39', 'y_best_nu_tgarch_t+39', 'rv_t+40', 'lg_return_t+40', 'y_best_alpha_1_t+40', 'y_best_beta_1_t+40', 'y_best_omega_t+40', 'y_best_mu_t+40', 'y_best_nu_tgarch_t+40', 'rv_t+41', 'lg_return_t+41', 'y_best_alpha_1_t+41', 'y_best_beta_1_t+41', 'y_best_omega_t+41', 'y_best_mu_t+41', 'y_best_nu_tgarch_t+41', 'rv_t+42', 'lg_return_t+42', 'y_best_alpha_1_t+42', 'y_best_beta_1_t+42', 'y_best_omega_t+42', 'y_best_mu_t+42', 'y_best_nu_tgarch_t+42', 'rv_t+43', 'lg_return_t+43', 'y_best_alpha_1_t+43', 'y_best_beta_1_t+43', 'y_best_omega_t+43', 'y_best_mu_t+43', 'y_best_nu_tgarch_t+43', 'rv_t+44', 'lg_return_t+44', 'y_best_alpha_1_t+44', 'y_best_beta_1_t+44', 'y_best_omega_t+44', 'y_best_mu_t+44', 'y_best_nu_tgarch_t+44', 'rv_t+45', 'lg_return_t+45', 'y_best_alpha_1_t+45', 'y_best_beta_1_t+45', 'y_best_omega_t+45', 'y_best_mu_t+45', 'y_best_nu_tgarch_t+45', 'rv_t+46', 'lg_return_t+46', 'y_best_alpha_1_t+46', 'y_best_beta_1_t+46', 'y_best_omega_t+46', 'y_best_mu_t+46', 'y_best_nu_tgarch_t+46', 'rv_t+47', 'lg_return_t+47', 'y_best_alpha_1_t+47', 'y_best_beta_1_t+47', 'y_best_omega_t+47', 'y_best_mu_t+47', 'y_best_nu_tgarch_t+47', 'rv_t+48', 'lg_return_t+48', 'y_best_alpha_1_t+48', 'y_best_beta_1_t+48', 'y_best_omega_t+48', 'y_best_mu_t+48', 'y_best_nu_tgarch_t+48', 'rv_t+49', 'lg_return_t+49', 'y_best_alpha_1_t+49', 'y_best_beta_1_t+49', 'y_best_omega_t+49', 'y_best_mu_t+49', 'y_best_nu_tgarch_t+49', 'rv_t+50', 'lg_return_t+50', 'y_best_alpha_1_t+50', 'y_best_beta_1_t+50', 'y_best_omega_t+50', 'y_best_mu_t+50', 'y_best_nu_tgarch_t+50', 'rv_t+51', 'lg_return_t+51', 'y_best_alpha_1_t+51', 'y_best_beta_1_t+51', 'y_best_omega_t+51', 'y_best_mu_t+51', 'y_best_nu_tgarch_t+51', 'rv_t+52', 'lg_return_t+52', 'y_best_alpha_1_t+52', 'y_best_beta_1_t+52', 'y_best_omega_t+52', 'y_best_mu_t+52', 'y_best_nu_tgarch_t+52', 'rv_t+53', 'lg_return_t+53', 'y_best_alpha_1_t+53', 'y_best_beta_1_t+53', 'y_best_omega_t+53', 'y_best_mu_t+53', 'y_best_nu_tgarch_t+53', 'rv_t+54', 'lg_return_t+54', 'y_best_alpha_1_t+54', 'y_best_beta_1_t+54', 'y_best_omega_t+54', 'y_best_mu_t+54', 'y_best_nu_tgarch_t+54', 'rv_t+55', 'lg_return_t+55', 'y_best_alpha_1_t+55', 'y_best_beta_1_t+55', 'y_best_omega_t+55', 'y_best_mu_t+55', 'y_best_nu_tgarch_t+55', 'rv_t+56', 'lg_return_t+56', 'y_best_alpha_1_t+56', 'y_best_beta_1_t+56', 'y_best_omega_t+56', 'y_best_mu_t+56', 'y_best_nu_tgarch_t+56', 'rv_t+57', 'lg_return_t+57', 'y_best_alpha_1_t+57', 'y_best_beta_1_t+57', 'y_best_omega_t+57', 'y_best_mu_t+57', 'y_best_nu_tgarch_t+57', 'rv_t+58', 'lg_return_t+58', 'y_best_alpha_1_t+58', 'y_best_beta_1_t+58', 'y_best_omega_t+58', 'y_best_mu_t+58', 'y_best_nu_tgarch_t+58', 'rv_t+59', 'lg_return_t+59', 'y_best_alpha_1_t+59', 'y_best_beta_1_t+59', 'y_best_omega_t+59', 'y_best_mu_t+59', 'y_best_nu_tgarch_t+59', 'rv_t+60', 'lg_return_t+60', 'y_best_alpha_1_t+60', 'y_best_beta_1_t+60', 'y_best_omega_t+60', 'y_best_mu_t+60', 'y_best_nu_tgarch_t+60']
['rv_t+1', 'lg_return_t+1', 'y_best_alpha_1_t+1', 'y_best_beta_1_t+1', 'y_best_omega_t+1', 'y_best_mu_t+1', 'y_best_nu_tgarch_t+1', 'rv_t+2', 'lg_return_t+2', 'y_best_alpha_1_t+2', 'y_best_beta_1_t+2', 'y_best_omega_t+2', 'y_best_mu_t+2', 'y_best_nu_tgarch_t+2', 'rv_t+3', 'lg_return_t+3', 'y_best_alpha_1_t+3', 'y_best_beta_1_t+3', 'y_best_omega_t+3', 'y_best_mu_t+3', 'y_best_nu_tgarch_t+3', 'rv_t+4', 'lg_return_t+4', 'y_best_alpha_1_t+4', 'y_best_beta_1_t+4', 'y_best_omega_t+4', 'y_best_mu_t+4', 'y_best_nu_tgarch_t+4', 'rv_t+5', 'lg_return_t+5', 'y_best_alpha_1_t+5', 'y_best_beta_1_t+5', 'y_best_omega_t+5', 'y_best_mu_t+5', 'y_best_nu_tgarch_t+5', 'rv_t+6', 'lg_return_t+6', 'y_best_alpha_1_t+6', 'y_best_beta_1_t+6', 'y_best_omega_t+6', 'y_best_mu_t+6', 'y_best_nu_tgarch_t+6', 'rv_t+7', 'lg_return_t+7', 'y_best_alpha_1_t+7', 'y_best_beta_1_t+7', 'y_best_omega_t+7', 'y_best_mu_t+7', 'y_best_nu_tgarch_t+7', 'rv_t+8', 'lg_return_t+8', 'y_best_alpha_1_t+8', 'y_best_beta_1_t+8', 'y_best_omega_t+8', 'y_best_mu_t+8', 'y_best_nu_tgarch_t+8', 'rv_t+9', 'lg_return_t+9', 'y_best_alpha_1_t+9', 'y_best_beta_1_t+9', 'y_best_omega_t+9', 'y_best_mu_t+9', 'y_best_nu_tgarch_t+9', 'rv_t+10', 'lg_return_t+10', 'y_best_alpha_1_t+10', 'y_best_beta_1_t+10', 'y_best_omega_t+10', 'y_best_mu_t+10', 'y_best_nu_tgarch_t+10', 'rv_t+11', 'lg_return_t+11', 'y_best_alpha_1_t+11', 'y_best_beta_1_t+11', 'y_best_omega_t+11', 'y_best_mu_t+11', 'y_best_nu_tgarch_t+11', 'rv_t+12', 'lg_return_t+12', 'y_best_alpha_1_t+12', 'y_best_beta_1_t+12', 'y_best_omega_t+12', 'y_best_mu_t+12', 'y_best_nu_tgarch_t+12', 'rv_t+13', 'lg_return_t+13', 'y_best_alpha_1_t+13', 'y_best_beta_1_t+13', 'y_best_omega_t+13', 'y_best_mu_t+13', 'y_best_nu_tgarch_t+13', 'rv_t+14', 'lg_return_t+14', 'y_best_alpha_1_t+14', 'y_best_beta_1_t+14', 'y_best_omega_t+14', 'y_best_mu_t+14', 'y_best_nu_tgarch_t+14', 'rv_t+15', 'lg_return_t+15', 'y_best_alpha_1_t+15', 'y_best_beta_1_t+15', 'y_best_omega_t+15', 'y_best_mu_t+15', 'y_best_nu_tgarch_t+15', 'rv_t+16', 'lg_return_t+16', 'y_best_alpha_1_t+16', 'y_best_beta_1_t+16', 'y_best_omega_t+16', 'y_best_mu_t+16', 'y_best_nu_tgarch_t+16', 'rv_t+17', 'lg_return_t+17', 'y_best_alpha_1_t+17', 'y_best_beta_1_t+17', 'y_best_omega_t+17', 'y_best_mu_t+17', 'y_best_nu_tgarch_t+17', 'rv_t+18', 'lg_return_t+18', 'y_best_alpha_1_t+18', 'y_best_beta_1_t+18', 'y_best_omega_t+18', 'y_best_mu_t+18', 'y_best_nu_tgarch_t+18', 'rv_t+19', 'lg_return_t+19', 'y_best_alpha_1_t+19', 'y_best_beta_1_t+19', 'y_best_omega_t+19', 'y_best_mu_t+19', 'y_best_nu_tgarch_t+19', 'rv_t+20', 'lg_return_t+20', 'y_best_alpha_1_t+20', 'y_best_beta_1_t+20', 'y_best_omega_t+20', 'y_best_mu_t+20', 'y_best_nu_tgarch_t+20', 'rv_t+21', 'lg_return_t+21', 'y_best_alpha_1_t+21', 'y_best_beta_1_t+21', 'y_best_omega_t+21', 'y_best_mu_t+21', 'y_best_nu_tgarch_t+21', 'rv_t+22', 'lg_return_t+22', 'y_best_alpha_1_t+22', 'y_best_beta_1_t+22', 'y_best_omega_t+22', 'y_best_mu_t+22', 'y_best_nu_tgarch_t+22', 'rv_t+23', 'lg_return_t+23', 'y_best_alpha_1_t+23', 'y_best_beta_1_t+23', 'y_best_omega_t+23', 'y_best_mu_t+23', 'y_best_nu_tgarch_t+23', 'rv_t+24', 'lg_return_t+24', 'y_best_alpha_1_t+24', 'y_best_beta_1_t+24', 'y_best_omega_t+24', 'y_best_mu_t+24', 'y_best_nu_tgarch_t+24', 'rv_t+25', 'lg_return_t+25', 'y_best_alpha_1_t+25', 'y_best_beta_1_t+25', 'y_best_omega_t+25', 'y_best_mu_t+25', 'y_best_nu_tgarch_t+25', 'rv_t+26', 'lg_return_t+26', 'y_best_alpha_1_t+26', 'y_best_beta_1_t+26', 'y_best_omega_t+26', 'y_best_mu_t+26', 'y_best_nu_tgarch_t+26', 'rv_t+27', 'lg_return_t+27', 'y_best_alpha_1_t+27', 'y_best_beta_1_t+27', 'y_best_omega_t+27', 'y_best_mu_t+27', 'y_best_nu_tgarch_t+27', 'rv_t+28', 'lg_return_t+28', 'y_best_alpha_1_t+28', 'y_best_beta_1_t+28', 'y_best_omega_t+28', 'y_best_mu_t+28', 'y_best_nu_tgarch_t+28', 'rv_t+29', 'lg_return_t+29', 'y_best_alpha_1_t+29', 'y_best_beta_1_t+29', 'y_best_omega_t+29', 'y_best_mu_t+29', 'y_best_nu_tgarch_t+29', 'rv_t+30', 'lg_return_t+30', 'y_best_alpha_1_t+30', 'y_best_beta_1_t+30', 'y_best_omega_t+30', 'y_best_mu_t+30', 'y_best_nu_tgarch_t+30', 'rv_t+31', 'lg_return_t+31', 'y_best_alpha_1_t+31', 'y_best_beta_1_t+31', 'y_best_omega_t+31', 'y_best_mu_t+31', 'y_best_nu_tgarch_t+31', 'rv_t+32', 'lg_return_t+32', 'y_best_alpha_1_t+32', 'y_best_beta_1_t+32', 'y_best_omega_t+32', 'y_best_mu_t+32', 'y_best_nu_tgarch_t+32', 'rv_t+33', 'lg_return_t+33', 'y_best_alpha_1_t+33', 'y_best_beta_1_t+33', 'y_best_omega_t+33', 'y_best_mu_t+33', 'y_best_nu_tgarch_t+33', 'rv_t+34', 'lg_return_t+34', 'y_best_alpha_1_t+34', 'y_best_beta_1_t+34', 'y_best_omega_t+34', 'y_best_mu_t+34', 'y_best_nu_tgarch_t+34', 'rv_t+35', 'lg_return_t+35', 'y_best_alpha_1_t+35', 'y_best_beta_1_t+35', 'y_best_omega_t+35', 'y_best_mu_t+35', 'y_best_nu_tgarch_t+35', 'rv_t+36', 'lg_return_t+36', 'y_best_alpha_1_t+36', 'y_best_beta_1_t+36', 'y_best_omega_t+36', 'y_best_mu_t+36', 'y_best_nu_tgarch_t+36', 'rv_t+37', 'lg_return_t+37', 'y_best_alpha_1_t+37', 'y_best_beta_1_t+37', 'y_best_omega_t+37', 'y_best_mu_t+37', 'y_best_nu_tgarch_t+37', 'rv_t+38', 'lg_return_t+38', 'y_best_alpha_1_t+38', 'y_best_beta_1_t+38', 'y_best_omega_t+38', 'y_best_mu_t+38', 'y_best_nu_tgarch_t+38', 'rv_t+39', 'lg_return_t+39', 'y_best_alpha_1_t+39', 'y_best_beta_1_t+39', 'y_best_omega_t+39', 'y_best_mu_t+39', 'y_best_nu_tgarch_t+39', 'rv_t+40', 'lg_return_t+40', 'y_best_alpha_1_t+40', 'y_best_beta_1_t+40', 'y_best_omega_t+40', 'y_best_mu_t+40', 'y_best_nu_tgarch_t+40', 'rv_t+41', 'lg_return_t+41', 'y_best_alpha_1_t+41', 'y_best_beta_1_t+41', 'y_best_omega_t+41', 'y_best_mu_t+41', 'y_best_nu_tgarch_t+41', 'rv_t+42', 'lg_return_t+42', 'y_best_alpha_1_t+42', 'y_best_beta_1_t+42', 'y_best_omega_t+42', 'y_best_mu_t+42', 'y_best_nu_tgarch_t+42', 'rv_t+43', 'lg_return_t+43', 'y_best_alpha_1_t+43', 'y_best_beta_1_t+43', 'y_best_omega_t+43', 'y_best_mu_t+43', 'y_best_nu_tgarch_t+43', 'rv_t+44', 'lg_return_t+44', 'y_best_alpha_1_t+44', 'y_best_beta_1_t+44', 'y_best_omega_t+44', 'y_best_mu_t+44', 'y_best_nu_tgarch_t+44', 'rv_t+45', 'lg_return_t+45', 'y_best_alpha_1_t+45', 'y_best_beta_1_t+45', 'y_best_omega_t+45', 'y_best_mu_t+45', 'y_best_nu_tgarch_t+45', 'rv_t+46', 'lg_return_t+46', 'y_best_alpha_1_t+46', 'y_best_beta_1_t+46', 'y_best_omega_t+46', 'y_best_mu_t+46', 'y_best_nu_tgarch_t+46', 'rv_t+47', 'lg_return_t+47', 'y_best_alpha_1_t+47', 'y_best_beta_1_t+47', 'y_best_omega_t+47', 'y_best_mu_t+47', 'y_best_nu_tgarch_t+47', 'rv_t+48', 'lg_return_t+48', 'y_best_alpha_1_t+48', 'y_best_beta_1_t+48', 'y_best_omega_t+48', 'y_best_mu_t+48', 'y_best_nu_tgarch_t+48', 'rv_t+49', 'lg_return_t+49', 'y_best_alpha_1_t+49', 'y_best_beta_1_t+49', 'y_best_omega_t+49', 'y_best_mu_t+49', 'y_best_nu_tgarch_t+49', 'rv_t+50', 'lg_return_t+50', 'y_best_alpha_1_t+50', 'y_best_beta_1_t+50', 'y_best_omega_t+50', 'y_best_mu_t+50', 'y_best_nu_tgarch_t+50', 'rv_t+51', 'lg_return_t+51', 'y_best_alpha_1_t+51', 'y_best_beta_1_t+51', 'y_best_omega_t+51', 'y_best_mu_t+51', 'y_best_nu_tgarch_t+51', 'rv_t+52', 'lg_return_t+52', 'y_best_alpha_1_t+52', 'y_best_beta_1_t+52', 'y_best_omega_t+52', 'y_best_mu_t+52', 'y_best_nu_tgarch_t+52', 'rv_t+53', 'lg_return_t+53', 'y_best_alpha_1_t+53', 'y_best_beta_1_t+53', 'y_best_omega_t+53', 'y_best_mu_t+53', 'y_best_nu_tgarch_t+53', 'rv_t+54', 'lg_return_t+54', 'y_best_alpha_1_t+54', 'y_best_beta_1_t+54', 'y_best_omega_t+54', 'y_best_mu_t+54', 'y_best_nu_tgarch_t+54', 'rv_t+55', 'lg_return_t+55', 'y_best_alpha_1_t+55', 'y_best_beta_1_t+55', 'y_best_omega_t+55', 'y_best_mu_t+55', 'y_best_nu_tgarch_t+55', 'rv_t+56', 'lg_return_t+56', 'y_best_alpha_1_t+56', 'y_best_beta_1_t+56', 'y_best_omega_t+56', 'y_best_mu_t+56', 'y_best_nu_tgarch_t+56', 'rv_t+57', 'lg_return_t+57', 'y_best_alpha_1_t+57', 'y_best_beta_1_t+57', 'y_best_omega_t+57', 'y_best_mu_t+57', 'y_best_nu_tgarch_t+57', 'rv_t+58', 'lg_return_t+58', 'y_best_alpha_1_t+58', 'y_best_beta_1_t+58', 'y_best_omega_t+58', 'y_best_mu_t+58', 'y_best_nu_tgarch_t+58', 'rv_t+59', 'lg_return_t+59', 'y_best_alpha_1_t+59', 'y_best_beta_1_t+59', 'y_best_omega_t+59', 'y_best_mu_t+59', 'y_best_nu_tgarch_t+59', 'rv_t+60', 'lg_return_t+60', 'y_best_alpha_1_t+60', 'y_best_beta_1_t+60', 'y_best_omega_t+60', 'y_best_mu_t+60', 'y_best_nu_tgarch_t+60']

Loading data for hierharhical tasks with no additional x feature¶

In [5]:
import numpy as np

load_data_object_2_file_path = os.path.join(root_folder, objects_relative_path, "structured_data_dict_2.pkl")

with open(load_data_object_2_file_path, "rb") as f:
    structured_data_dict_2 = pickle.load(f)

print("Data dictionary 2 loaded successfully.")

eur2_X_price = structured_data_dict_2["EURUSD"].get("X_other")
eur2_X_time = structured_data_dict_2["EURUSD"].get("X_time")
eur2_y = structured_data_dict_2["EURUSD"].get("y")
eur2_y_columns = structured_data_dict_2["EURUSD"].get("y_columns")
eur2_X_columns = structured_data_dict_2["EURUSD"].get("X_other_columns")

print(eur2_X_columns)

print(eur2_y[:,0,0])
Data dictionary 2 loaded successfully.
['rv_t-59', 'x_best_alpha_1_t-59', 'x_best_beta_1_t-59', 'x_best_omega_t-59', 'x_best_mu_t-59', 'x_best_nu_tgarch_t-59', 'rv_t-58', 'x_best_alpha_1_t-58', 'x_best_beta_1_t-58', 'x_best_omega_t-58', 'x_best_mu_t-58', 'x_best_nu_tgarch_t-58', 'rv_t-57', 'x_best_alpha_1_t-57', 'x_best_beta_1_t-57', 'x_best_omega_t-57', 'x_best_mu_t-57', 'x_best_nu_tgarch_t-57', 'rv_t-56', 'x_best_alpha_1_t-56', 'x_best_beta_1_t-56', 'x_best_omega_t-56', 'x_best_mu_t-56', 'x_best_nu_tgarch_t-56', 'rv_t-55', 'x_best_alpha_1_t-55', 'x_best_beta_1_t-55', 'x_best_omega_t-55', 'x_best_mu_t-55', 'x_best_nu_tgarch_t-55', 'rv_t-54', 'x_best_alpha_1_t-54', 'x_best_beta_1_t-54', 'x_best_omega_t-54', 'x_best_mu_t-54', 'x_best_nu_tgarch_t-54', 'rv_t-53', 'x_best_alpha_1_t-53', 'x_best_beta_1_t-53', 'x_best_omega_t-53', 'x_best_mu_t-53', 'x_best_nu_tgarch_t-53', 'rv_t-52', 'x_best_alpha_1_t-52', 'x_best_beta_1_t-52', 'x_best_omega_t-52', 'x_best_mu_t-52', 'x_best_nu_tgarch_t-52', 'rv_t-51', 'x_best_alpha_1_t-51', 'x_best_beta_1_t-51', 'x_best_omega_t-51', 'x_best_mu_t-51', 'x_best_nu_tgarch_t-51', 'rv_t-50', 'x_best_alpha_1_t-50', 'x_best_beta_1_t-50', 'x_best_omega_t-50', 'x_best_mu_t-50', 'x_best_nu_tgarch_t-50', 'rv_t-49', 'x_best_alpha_1_t-49', 'x_best_beta_1_t-49', 'x_best_omega_t-49', 'x_best_mu_t-49', 'x_best_nu_tgarch_t-49', 'rv_t-48', 'x_best_alpha_1_t-48', 'x_best_beta_1_t-48', 'x_best_omega_t-48', 'x_best_mu_t-48', 'x_best_nu_tgarch_t-48', 'rv_t-47', 'x_best_alpha_1_t-47', 'x_best_beta_1_t-47', 'x_best_omega_t-47', 'x_best_mu_t-47', 'x_best_nu_tgarch_t-47', 'rv_t-46', 'x_best_alpha_1_t-46', 'x_best_beta_1_t-46', 'x_best_omega_t-46', 'x_best_mu_t-46', 'x_best_nu_tgarch_t-46', 'rv_t-45', 'x_best_alpha_1_t-45', 'x_best_beta_1_t-45', 'x_best_omega_t-45', 'x_best_mu_t-45', 'x_best_nu_tgarch_t-45', 'rv_t-44', 'x_best_alpha_1_t-44', 'x_best_beta_1_t-44', 'x_best_omega_t-44', 'x_best_mu_t-44', 'x_best_nu_tgarch_t-44', 'rv_t-43', 'x_best_alpha_1_t-43', 'x_best_beta_1_t-43', 'x_best_omega_t-43', 'x_best_mu_t-43', 'x_best_nu_tgarch_t-43', 'rv_t-42', 'x_best_alpha_1_t-42', 'x_best_beta_1_t-42', 'x_best_omega_t-42', 'x_best_mu_t-42', 'x_best_nu_tgarch_t-42', 'rv_t-41', 'x_best_alpha_1_t-41', 'x_best_beta_1_t-41', 'x_best_omega_t-41', 'x_best_mu_t-41', 'x_best_nu_tgarch_t-41', 'rv_t-40', 'x_best_alpha_1_t-40', 'x_best_beta_1_t-40', 'x_best_omega_t-40', 'x_best_mu_t-40', 'x_best_nu_tgarch_t-40', 'rv_t-39', 'x_best_alpha_1_t-39', 'x_best_beta_1_t-39', 'x_best_omega_t-39', 'x_best_mu_t-39', 'x_best_nu_tgarch_t-39', 'rv_t-38', 'x_best_alpha_1_t-38', 'x_best_beta_1_t-38', 'x_best_omega_t-38', 'x_best_mu_t-38', 'x_best_nu_tgarch_t-38', 'rv_t-37', 'x_best_alpha_1_t-37', 'x_best_beta_1_t-37', 'x_best_omega_t-37', 'x_best_mu_t-37', 'x_best_nu_tgarch_t-37', 'rv_t-36', 'x_best_alpha_1_t-36', 'x_best_beta_1_t-36', 'x_best_omega_t-36', 'x_best_mu_t-36', 'x_best_nu_tgarch_t-36', 'rv_t-35', 'x_best_alpha_1_t-35', 'x_best_beta_1_t-35', 'x_best_omega_t-35', 'x_best_mu_t-35', 'x_best_nu_tgarch_t-35', 'rv_t-34', 'x_best_alpha_1_t-34', 'x_best_beta_1_t-34', 'x_best_omega_t-34', 'x_best_mu_t-34', 'x_best_nu_tgarch_t-34', 'rv_t-33', 'x_best_alpha_1_t-33', 'x_best_beta_1_t-33', 'x_best_omega_t-33', 'x_best_mu_t-33', 'x_best_nu_tgarch_t-33', 'rv_t-32', 'x_best_alpha_1_t-32', 'x_best_beta_1_t-32', 'x_best_omega_t-32', 'x_best_mu_t-32', 'x_best_nu_tgarch_t-32', 'rv_t-31', 'x_best_alpha_1_t-31', 'x_best_beta_1_t-31', 'x_best_omega_t-31', 'x_best_mu_t-31', 'x_best_nu_tgarch_t-31', 'rv_t-30', 'x_best_alpha_1_t-30', 'x_best_beta_1_t-30', 'x_best_omega_t-30', 'x_best_mu_t-30', 'x_best_nu_tgarch_t-30', 'rv_t-29', 'x_best_alpha_1_t-29', 'x_best_beta_1_t-29', 'x_best_omega_t-29', 'x_best_mu_t-29', 'x_best_nu_tgarch_t-29', 'rv_t-28', 'x_best_alpha_1_t-28', 'x_best_beta_1_t-28', 'x_best_omega_t-28', 'x_best_mu_t-28', 'x_best_nu_tgarch_t-28', 'rv_t-27', 'x_best_alpha_1_t-27', 'x_best_beta_1_t-27', 'x_best_omega_t-27', 'x_best_mu_t-27', 'x_best_nu_tgarch_t-27', 'rv_t-26', 'x_best_alpha_1_t-26', 'x_best_beta_1_t-26', 'x_best_omega_t-26', 'x_best_mu_t-26', 'x_best_nu_tgarch_t-26', 'rv_t-25', 'x_best_alpha_1_t-25', 'x_best_beta_1_t-25', 'x_best_omega_t-25', 'x_best_mu_t-25', 'x_best_nu_tgarch_t-25', 'rv_t-24', 'x_best_alpha_1_t-24', 'x_best_beta_1_t-24', 'x_best_omega_t-24', 'x_best_mu_t-24', 'x_best_nu_tgarch_t-24', 'rv_t-23', 'x_best_alpha_1_t-23', 'x_best_beta_1_t-23', 'x_best_omega_t-23', 'x_best_mu_t-23', 'x_best_nu_tgarch_t-23', 'rv_t-22', 'x_best_alpha_1_t-22', 'x_best_beta_1_t-22', 'x_best_omega_t-22', 'x_best_mu_t-22', 'x_best_nu_tgarch_t-22', 'rv_t-21', 'x_best_alpha_1_t-21', 'x_best_beta_1_t-21', 'x_best_omega_t-21', 'x_best_mu_t-21', 'x_best_nu_tgarch_t-21', 'rv_t-20', 'x_best_alpha_1_t-20', 'x_best_beta_1_t-20', 'x_best_omega_t-20', 'x_best_mu_t-20', 'x_best_nu_tgarch_t-20', 'rv_t-19', 'x_best_alpha_1_t-19', 'x_best_beta_1_t-19', 'x_best_omega_t-19', 'x_best_mu_t-19', 'x_best_nu_tgarch_t-19', 'rv_t-18', 'x_best_alpha_1_t-18', 'x_best_beta_1_t-18', 'x_best_omega_t-18', 'x_best_mu_t-18', 'x_best_nu_tgarch_t-18', 'rv_t-17', 'x_best_alpha_1_t-17', 'x_best_beta_1_t-17', 'x_best_omega_t-17', 'x_best_mu_t-17', 'x_best_nu_tgarch_t-17', 'rv_t-16', 'x_best_alpha_1_t-16', 'x_best_beta_1_t-16', 'x_best_omega_t-16', 'x_best_mu_t-16', 'x_best_nu_tgarch_t-16', 'rv_t-15', 'x_best_alpha_1_t-15', 'x_best_beta_1_t-15', 'x_best_omega_t-15', 'x_best_mu_t-15', 'x_best_nu_tgarch_t-15', 'rv_t-14', 'x_best_alpha_1_t-14', 'x_best_beta_1_t-14', 'x_best_omega_t-14', 'x_best_mu_t-14', 'x_best_nu_tgarch_t-14', 'rv_t-13', 'x_best_alpha_1_t-13', 'x_best_beta_1_t-13', 'x_best_omega_t-13', 'x_best_mu_t-13', 'x_best_nu_tgarch_t-13', 'rv_t-12', 'x_best_alpha_1_t-12', 'x_best_beta_1_t-12', 'x_best_omega_t-12', 'x_best_mu_t-12', 'x_best_nu_tgarch_t-12', 'rv_t-11', 'x_best_alpha_1_t-11', 'x_best_beta_1_t-11', 'x_best_omega_t-11', 'x_best_mu_t-11', 'x_best_nu_tgarch_t-11', 'rv_t-10', 'x_best_alpha_1_t-10', 'x_best_beta_1_t-10', 'x_best_omega_t-10', 'x_best_mu_t-10', 'x_best_nu_tgarch_t-10', 'rv_t-9', 'x_best_alpha_1_t-9', 'x_best_beta_1_t-9', 'x_best_omega_t-9', 'x_best_mu_t-9', 'x_best_nu_tgarch_t-9', 'rv_t-8', 'x_best_alpha_1_t-8', 'x_best_beta_1_t-8', 'x_best_omega_t-8', 'x_best_mu_t-8', 'x_best_nu_tgarch_t-8', 'rv_t-7', 'x_best_alpha_1_t-7', 'x_best_beta_1_t-7', 'x_best_omega_t-7', 'x_best_mu_t-7', 'x_best_nu_tgarch_t-7', 'rv_t-6', 'x_best_alpha_1_t-6', 'x_best_beta_1_t-6', 'x_best_omega_t-6', 'x_best_mu_t-6', 'x_best_nu_tgarch_t-6', 'rv_t-5', 'x_best_alpha_1_t-5', 'x_best_beta_1_t-5', 'x_best_omega_t-5', 'x_best_mu_t-5', 'x_best_nu_tgarch_t-5', 'rv_t-4', 'x_best_alpha_1_t-4', 'x_best_beta_1_t-4', 'x_best_omega_t-4', 'x_best_mu_t-4', 'x_best_nu_tgarch_t-4', 'rv_t-3', 'x_best_alpha_1_t-3', 'x_best_beta_1_t-3', 'x_best_omega_t-3', 'x_best_mu_t-3', 'x_best_nu_tgarch_t-3', 'rv_t-2', 'x_best_alpha_1_t-2', 'x_best_beta_1_t-2', 'x_best_omega_t-2', 'x_best_mu_t-2', 'x_best_nu_tgarch_t-2', 'rv_t-1', 'x_best_alpha_1_t-1', 'x_best_beta_1_t-1', 'x_best_omega_t-1', 'x_best_mu_t-1', 'x_best_nu_tgarch_t-1', 'rv_t-0', 'x_best_alpha_1_t-0', 'x_best_beta_1_t-0', 'x_best_omega_t-0', 'x_best_mu_t-0', 'x_best_nu_tgarch_t-0']
[0.10789115 0.0094245  0.12050298 ... 0.35689057 0.31497138 0.32454847]

Loading a third type of data¶

In [6]:
import numpy as np

load_data_object_3_file_path = os.path.join(root_folder, objects_relative_path, "structured_data_dict_3.pkl")

with open(load_data_object_3_file_path, "rb") as f:
    structured_data_dict_3 = pickle.load(f)

print("Data dictionary 3 loaded successfully.")

eur3_X_price = structured_data_dict_3["EURUSD"].get("X_other")
eur3_X_time = structured_data_dict_3["EURUSD"].get("X_time")
eur3_y = structured_data_dict_3["EURUSD"].get("y")
eur3_y_columns = structured_data_dict_3["EURUSD"].get("y_columns")
eur3_X_columns = structured_data_dict_3["EURUSD"].get("X_other_columns")


print(eur3_X_price.shape)
print(eur3_y.shape)
print(eur3_X_columns)
print(eur3_y_columns)


print(eur3_y[:,0,0])
Data dictionary 3 loaded successfully.
(3782, 60, 1)
(3782, 60, 7)
['rv_t-59', 'rv_t-58', 'rv_t-57', 'rv_t-56', 'rv_t-55', 'rv_t-54', 'rv_t-53', 'rv_t-52', 'rv_t-51', 'rv_t-50', 'rv_t-49', 'rv_t-48', 'rv_t-47', 'rv_t-46', 'rv_t-45', 'rv_t-44', 'rv_t-43', 'rv_t-42', 'rv_t-41', 'rv_t-40', 'rv_t-39', 'rv_t-38', 'rv_t-37', 'rv_t-36', 'rv_t-35', 'rv_t-34', 'rv_t-33', 'rv_t-32', 'rv_t-31', 'rv_t-30', 'rv_t-29', 'rv_t-28', 'rv_t-27', 'rv_t-26', 'rv_t-25', 'rv_t-24', 'rv_t-23', 'rv_t-22', 'rv_t-21', 'rv_t-20', 'rv_t-19', 'rv_t-18', 'rv_t-17', 'rv_t-16', 'rv_t-15', 'rv_t-14', 'rv_t-13', 'rv_t-12', 'rv_t-11', 'rv_t-10', 'rv_t-9', 'rv_t-8', 'rv_t-7', 'rv_t-6', 'rv_t-5', 'rv_t-4', 'rv_t-3', 'rv_t-2', 'rv_t-1', 'rv_t-0']
['rv_t+1', 'lg_return_t+1', 'y_best_alpha_1_t+1', 'y_best_beta_1_t+1', 'y_best_omega_t+1', 'y_best_mu_t+1', 'y_best_nu_tgarch_t+1', 'rv_t+2', 'lg_return_t+2', 'y_best_alpha_1_t+2', 'y_best_beta_1_t+2', 'y_best_omega_t+2', 'y_best_mu_t+2', 'y_best_nu_tgarch_t+2', 'rv_t+3', 'lg_return_t+3', 'y_best_alpha_1_t+3', 'y_best_beta_1_t+3', 'y_best_omega_t+3', 'y_best_mu_t+3', 'y_best_nu_tgarch_t+3', 'rv_t+4', 'lg_return_t+4', 'y_best_alpha_1_t+4', 'y_best_beta_1_t+4', 'y_best_omega_t+4', 'y_best_mu_t+4', 'y_best_nu_tgarch_t+4', 'rv_t+5', 'lg_return_t+5', 'y_best_alpha_1_t+5', 'y_best_beta_1_t+5', 'y_best_omega_t+5', 'y_best_mu_t+5', 'y_best_nu_tgarch_t+5', 'rv_t+6', 'lg_return_t+6', 'y_best_alpha_1_t+6', 'y_best_beta_1_t+6', 'y_best_omega_t+6', 'y_best_mu_t+6', 'y_best_nu_tgarch_t+6', 'rv_t+7', 'lg_return_t+7', 'y_best_alpha_1_t+7', 'y_best_beta_1_t+7', 'y_best_omega_t+7', 'y_best_mu_t+7', 'y_best_nu_tgarch_t+7', 'rv_t+8', 'lg_return_t+8', 'y_best_alpha_1_t+8', 'y_best_beta_1_t+8', 'y_best_omega_t+8', 'y_best_mu_t+8', 'y_best_nu_tgarch_t+8', 'rv_t+9', 'lg_return_t+9', 'y_best_alpha_1_t+9', 'y_best_beta_1_t+9', 'y_best_omega_t+9', 'y_best_mu_t+9', 'y_best_nu_tgarch_t+9', 'rv_t+10', 'lg_return_t+10', 'y_best_alpha_1_t+10', 'y_best_beta_1_t+10', 'y_best_omega_t+10', 'y_best_mu_t+10', 'y_best_nu_tgarch_t+10', 'rv_t+11', 'lg_return_t+11', 'y_best_alpha_1_t+11', 'y_best_beta_1_t+11', 'y_best_omega_t+11', 'y_best_mu_t+11', 'y_best_nu_tgarch_t+11', 'rv_t+12', 'lg_return_t+12', 'y_best_alpha_1_t+12', 'y_best_beta_1_t+12', 'y_best_omega_t+12', 'y_best_mu_t+12', 'y_best_nu_tgarch_t+12', 'rv_t+13', 'lg_return_t+13', 'y_best_alpha_1_t+13', 'y_best_beta_1_t+13', 'y_best_omega_t+13', 'y_best_mu_t+13', 'y_best_nu_tgarch_t+13', 'rv_t+14', 'lg_return_t+14', 'y_best_alpha_1_t+14', 'y_best_beta_1_t+14', 'y_best_omega_t+14', 'y_best_mu_t+14', 'y_best_nu_tgarch_t+14', 'rv_t+15', 'lg_return_t+15', 'y_best_alpha_1_t+15', 'y_best_beta_1_t+15', 'y_best_omega_t+15', 'y_best_mu_t+15', 'y_best_nu_tgarch_t+15', 'rv_t+16', 'lg_return_t+16', 'y_best_alpha_1_t+16', 'y_best_beta_1_t+16', 'y_best_omega_t+16', 'y_best_mu_t+16', 'y_best_nu_tgarch_t+16', 'rv_t+17', 'lg_return_t+17', 'y_best_alpha_1_t+17', 'y_best_beta_1_t+17', 'y_best_omega_t+17', 'y_best_mu_t+17', 'y_best_nu_tgarch_t+17', 'rv_t+18', 'lg_return_t+18', 'y_best_alpha_1_t+18', 'y_best_beta_1_t+18', 'y_best_omega_t+18', 'y_best_mu_t+18', 'y_best_nu_tgarch_t+18', 'rv_t+19', 'lg_return_t+19', 'y_best_alpha_1_t+19', 'y_best_beta_1_t+19', 'y_best_omega_t+19', 'y_best_mu_t+19', 'y_best_nu_tgarch_t+19', 'rv_t+20', 'lg_return_t+20', 'y_best_alpha_1_t+20', 'y_best_beta_1_t+20', 'y_best_omega_t+20', 'y_best_mu_t+20', 'y_best_nu_tgarch_t+20', 'rv_t+21', 'lg_return_t+21', 'y_best_alpha_1_t+21', 'y_best_beta_1_t+21', 'y_best_omega_t+21', 'y_best_mu_t+21', 'y_best_nu_tgarch_t+21', 'rv_t+22', 'lg_return_t+22', 'y_best_alpha_1_t+22', 'y_best_beta_1_t+22', 'y_best_omega_t+22', 'y_best_mu_t+22', 'y_best_nu_tgarch_t+22', 'rv_t+23', 'lg_return_t+23', 'y_best_alpha_1_t+23', 'y_best_beta_1_t+23', 'y_best_omega_t+23', 'y_best_mu_t+23', 'y_best_nu_tgarch_t+23', 'rv_t+24', 'lg_return_t+24', 'y_best_alpha_1_t+24', 'y_best_beta_1_t+24', 'y_best_omega_t+24', 'y_best_mu_t+24', 'y_best_nu_tgarch_t+24', 'rv_t+25', 'lg_return_t+25', 'y_best_alpha_1_t+25', 'y_best_beta_1_t+25', 'y_best_omega_t+25', 'y_best_mu_t+25', 'y_best_nu_tgarch_t+25', 'rv_t+26', 'lg_return_t+26', 'y_best_alpha_1_t+26', 'y_best_beta_1_t+26', 'y_best_omega_t+26', 'y_best_mu_t+26', 'y_best_nu_tgarch_t+26', 'rv_t+27', 'lg_return_t+27', 'y_best_alpha_1_t+27', 'y_best_beta_1_t+27', 'y_best_omega_t+27', 'y_best_mu_t+27', 'y_best_nu_tgarch_t+27', 'rv_t+28', 'lg_return_t+28', 'y_best_alpha_1_t+28', 'y_best_beta_1_t+28', 'y_best_omega_t+28', 'y_best_mu_t+28', 'y_best_nu_tgarch_t+28', 'rv_t+29', 'lg_return_t+29', 'y_best_alpha_1_t+29', 'y_best_beta_1_t+29', 'y_best_omega_t+29', 'y_best_mu_t+29', 'y_best_nu_tgarch_t+29', 'rv_t+30', 'lg_return_t+30', 'y_best_alpha_1_t+30', 'y_best_beta_1_t+30', 'y_best_omega_t+30', 'y_best_mu_t+30', 'y_best_nu_tgarch_t+30', 'rv_t+31', 'lg_return_t+31', 'y_best_alpha_1_t+31', 'y_best_beta_1_t+31', 'y_best_omega_t+31', 'y_best_mu_t+31', 'y_best_nu_tgarch_t+31', 'rv_t+32', 'lg_return_t+32', 'y_best_alpha_1_t+32', 'y_best_beta_1_t+32', 'y_best_omega_t+32', 'y_best_mu_t+32', 'y_best_nu_tgarch_t+32', 'rv_t+33', 'lg_return_t+33', 'y_best_alpha_1_t+33', 'y_best_beta_1_t+33', 'y_best_omega_t+33', 'y_best_mu_t+33', 'y_best_nu_tgarch_t+33', 'rv_t+34', 'lg_return_t+34', 'y_best_alpha_1_t+34', 'y_best_beta_1_t+34', 'y_best_omega_t+34', 'y_best_mu_t+34', 'y_best_nu_tgarch_t+34', 'rv_t+35', 'lg_return_t+35', 'y_best_alpha_1_t+35', 'y_best_beta_1_t+35', 'y_best_omega_t+35', 'y_best_mu_t+35', 'y_best_nu_tgarch_t+35', 'rv_t+36', 'lg_return_t+36', 'y_best_alpha_1_t+36', 'y_best_beta_1_t+36', 'y_best_omega_t+36', 'y_best_mu_t+36', 'y_best_nu_tgarch_t+36', 'rv_t+37', 'lg_return_t+37', 'y_best_alpha_1_t+37', 'y_best_beta_1_t+37', 'y_best_omega_t+37', 'y_best_mu_t+37', 'y_best_nu_tgarch_t+37', 'rv_t+38', 'lg_return_t+38', 'y_best_alpha_1_t+38', 'y_best_beta_1_t+38', 'y_best_omega_t+38', 'y_best_mu_t+38', 'y_best_nu_tgarch_t+38', 'rv_t+39', 'lg_return_t+39', 'y_best_alpha_1_t+39', 'y_best_beta_1_t+39', 'y_best_omega_t+39', 'y_best_mu_t+39', 'y_best_nu_tgarch_t+39', 'rv_t+40', 'lg_return_t+40', 'y_best_alpha_1_t+40', 'y_best_beta_1_t+40', 'y_best_omega_t+40', 'y_best_mu_t+40', 'y_best_nu_tgarch_t+40', 'rv_t+41', 'lg_return_t+41', 'y_best_alpha_1_t+41', 'y_best_beta_1_t+41', 'y_best_omega_t+41', 'y_best_mu_t+41', 'y_best_nu_tgarch_t+41', 'rv_t+42', 'lg_return_t+42', 'y_best_alpha_1_t+42', 'y_best_beta_1_t+42', 'y_best_omega_t+42', 'y_best_mu_t+42', 'y_best_nu_tgarch_t+42', 'rv_t+43', 'lg_return_t+43', 'y_best_alpha_1_t+43', 'y_best_beta_1_t+43', 'y_best_omega_t+43', 'y_best_mu_t+43', 'y_best_nu_tgarch_t+43', 'rv_t+44', 'lg_return_t+44', 'y_best_alpha_1_t+44', 'y_best_beta_1_t+44', 'y_best_omega_t+44', 'y_best_mu_t+44', 'y_best_nu_tgarch_t+44', 'rv_t+45', 'lg_return_t+45', 'y_best_alpha_1_t+45', 'y_best_beta_1_t+45', 'y_best_omega_t+45', 'y_best_mu_t+45', 'y_best_nu_tgarch_t+45', 'rv_t+46', 'lg_return_t+46', 'y_best_alpha_1_t+46', 'y_best_beta_1_t+46', 'y_best_omega_t+46', 'y_best_mu_t+46', 'y_best_nu_tgarch_t+46', 'rv_t+47', 'lg_return_t+47', 'y_best_alpha_1_t+47', 'y_best_beta_1_t+47', 'y_best_omega_t+47', 'y_best_mu_t+47', 'y_best_nu_tgarch_t+47', 'rv_t+48', 'lg_return_t+48', 'y_best_alpha_1_t+48', 'y_best_beta_1_t+48', 'y_best_omega_t+48', 'y_best_mu_t+48', 'y_best_nu_tgarch_t+48', 'rv_t+49', 'lg_return_t+49', 'y_best_alpha_1_t+49', 'y_best_beta_1_t+49', 'y_best_omega_t+49', 'y_best_mu_t+49', 'y_best_nu_tgarch_t+49', 'rv_t+50', 'lg_return_t+50', 'y_best_alpha_1_t+50', 'y_best_beta_1_t+50', 'y_best_omega_t+50', 'y_best_mu_t+50', 'y_best_nu_tgarch_t+50', 'rv_t+51', 'lg_return_t+51', 'y_best_alpha_1_t+51', 'y_best_beta_1_t+51', 'y_best_omega_t+51', 'y_best_mu_t+51', 'y_best_nu_tgarch_t+51', 'rv_t+52', 'lg_return_t+52', 'y_best_alpha_1_t+52', 'y_best_beta_1_t+52', 'y_best_omega_t+52', 'y_best_mu_t+52', 'y_best_nu_tgarch_t+52', 'rv_t+53', 'lg_return_t+53', 'y_best_alpha_1_t+53', 'y_best_beta_1_t+53', 'y_best_omega_t+53', 'y_best_mu_t+53', 'y_best_nu_tgarch_t+53', 'rv_t+54', 'lg_return_t+54', 'y_best_alpha_1_t+54', 'y_best_beta_1_t+54', 'y_best_omega_t+54', 'y_best_mu_t+54', 'y_best_nu_tgarch_t+54', 'rv_t+55', 'lg_return_t+55', 'y_best_alpha_1_t+55', 'y_best_beta_1_t+55', 'y_best_omega_t+55', 'y_best_mu_t+55', 'y_best_nu_tgarch_t+55', 'rv_t+56', 'lg_return_t+56', 'y_best_alpha_1_t+56', 'y_best_beta_1_t+56', 'y_best_omega_t+56', 'y_best_mu_t+56', 'y_best_nu_tgarch_t+56', 'rv_t+57', 'lg_return_t+57', 'y_best_alpha_1_t+57', 'y_best_beta_1_t+57', 'y_best_omega_t+57', 'y_best_mu_t+57', 'y_best_nu_tgarch_t+57', 'rv_t+58', 'lg_return_t+58', 'y_best_alpha_1_t+58', 'y_best_beta_1_t+58', 'y_best_omega_t+58', 'y_best_mu_t+58', 'y_best_nu_tgarch_t+58', 'rv_t+59', 'lg_return_t+59', 'y_best_alpha_1_t+59', 'y_best_beta_1_t+59', 'y_best_omega_t+59', 'y_best_mu_t+59', 'y_best_nu_tgarch_t+59', 'rv_t+60', 'lg_return_t+60', 'y_best_alpha_1_t+60', 'y_best_beta_1_t+60', 'y_best_omega_t+60', 'y_best_mu_t+60', 'y_best_nu_tgarch_t+60']
[0.10789115 0.0094245  0.12050298 ... 0.35689057 0.31497138 0.32454847]

Loading a small data set¶

In [7]:
load_data_object_3_file_path = os.path.join(root_folder, objects_relative_path, "structured_data_dict_3.pkl")

with open(load_data_object_3_file_path, "rb") as f:
    structured_data_dict_3 = pickle.load(f)

print("Data dictionary 4 loaded successfully.")

eur3s_X_price = structured_data_dict_3["EURUSD"]["X_other"][:200]
eur3s_X_time  = structured_data_dict_3["EURUSD"]["X_time"][:200]
eur3s_y       = structured_data_dict_3 ["EURUSD"]["y"][:200]
eur3s_y_columns = structured_data_dict_3["EURUSD"].get("y_columns")
eur3s_X_columns = structured_data_dict_3["EURUSD"].get("X_other_columns")

print(eur3s_X_columns)
print(eur3s_y_columns)
Data dictionary 4 loaded successfully.
['rv_t-59', 'rv_t-58', 'rv_t-57', 'rv_t-56', 'rv_t-55', 'rv_t-54', 'rv_t-53', 'rv_t-52', 'rv_t-51', 'rv_t-50', 'rv_t-49', 'rv_t-48', 'rv_t-47', 'rv_t-46', 'rv_t-45', 'rv_t-44', 'rv_t-43', 'rv_t-42', 'rv_t-41', 'rv_t-40', 'rv_t-39', 'rv_t-38', 'rv_t-37', 'rv_t-36', 'rv_t-35', 'rv_t-34', 'rv_t-33', 'rv_t-32', 'rv_t-31', 'rv_t-30', 'rv_t-29', 'rv_t-28', 'rv_t-27', 'rv_t-26', 'rv_t-25', 'rv_t-24', 'rv_t-23', 'rv_t-22', 'rv_t-21', 'rv_t-20', 'rv_t-19', 'rv_t-18', 'rv_t-17', 'rv_t-16', 'rv_t-15', 'rv_t-14', 'rv_t-13', 'rv_t-12', 'rv_t-11', 'rv_t-10', 'rv_t-9', 'rv_t-8', 'rv_t-7', 'rv_t-6', 'rv_t-5', 'rv_t-4', 'rv_t-3', 'rv_t-2', 'rv_t-1', 'rv_t-0']
['rv_t+1', 'lg_return_t+1', 'y_best_alpha_1_t+1', 'y_best_beta_1_t+1', 'y_best_omega_t+1', 'y_best_mu_t+1', 'y_best_nu_tgarch_t+1', 'rv_t+2', 'lg_return_t+2', 'y_best_alpha_1_t+2', 'y_best_beta_1_t+2', 'y_best_omega_t+2', 'y_best_mu_t+2', 'y_best_nu_tgarch_t+2', 'rv_t+3', 'lg_return_t+3', 'y_best_alpha_1_t+3', 'y_best_beta_1_t+3', 'y_best_omega_t+3', 'y_best_mu_t+3', 'y_best_nu_tgarch_t+3', 'rv_t+4', 'lg_return_t+4', 'y_best_alpha_1_t+4', 'y_best_beta_1_t+4', 'y_best_omega_t+4', 'y_best_mu_t+4', 'y_best_nu_tgarch_t+4', 'rv_t+5', 'lg_return_t+5', 'y_best_alpha_1_t+5', 'y_best_beta_1_t+5', 'y_best_omega_t+5', 'y_best_mu_t+5', 'y_best_nu_tgarch_t+5', 'rv_t+6', 'lg_return_t+6', 'y_best_alpha_1_t+6', 'y_best_beta_1_t+6', 'y_best_omega_t+6', 'y_best_mu_t+6', 'y_best_nu_tgarch_t+6', 'rv_t+7', 'lg_return_t+7', 'y_best_alpha_1_t+7', 'y_best_beta_1_t+7', 'y_best_omega_t+7', 'y_best_mu_t+7', 'y_best_nu_tgarch_t+7', 'rv_t+8', 'lg_return_t+8', 'y_best_alpha_1_t+8', 'y_best_beta_1_t+8', 'y_best_omega_t+8', 'y_best_mu_t+8', 'y_best_nu_tgarch_t+8', 'rv_t+9', 'lg_return_t+9', 'y_best_alpha_1_t+9', 'y_best_beta_1_t+9', 'y_best_omega_t+9', 'y_best_mu_t+9', 'y_best_nu_tgarch_t+9', 'rv_t+10', 'lg_return_t+10', 'y_best_alpha_1_t+10', 'y_best_beta_1_t+10', 'y_best_omega_t+10', 'y_best_mu_t+10', 'y_best_nu_tgarch_t+10', 'rv_t+11', 'lg_return_t+11', 'y_best_alpha_1_t+11', 'y_best_beta_1_t+11', 'y_best_omega_t+11', 'y_best_mu_t+11', 'y_best_nu_tgarch_t+11', 'rv_t+12', 'lg_return_t+12', 'y_best_alpha_1_t+12', 'y_best_beta_1_t+12', 'y_best_omega_t+12', 'y_best_mu_t+12', 'y_best_nu_tgarch_t+12', 'rv_t+13', 'lg_return_t+13', 'y_best_alpha_1_t+13', 'y_best_beta_1_t+13', 'y_best_omega_t+13', 'y_best_mu_t+13', 'y_best_nu_tgarch_t+13', 'rv_t+14', 'lg_return_t+14', 'y_best_alpha_1_t+14', 'y_best_beta_1_t+14', 'y_best_omega_t+14', 'y_best_mu_t+14', 'y_best_nu_tgarch_t+14', 'rv_t+15', 'lg_return_t+15', 'y_best_alpha_1_t+15', 'y_best_beta_1_t+15', 'y_best_omega_t+15', 'y_best_mu_t+15', 'y_best_nu_tgarch_t+15', 'rv_t+16', 'lg_return_t+16', 'y_best_alpha_1_t+16', 'y_best_beta_1_t+16', 'y_best_omega_t+16', 'y_best_mu_t+16', 'y_best_nu_tgarch_t+16', 'rv_t+17', 'lg_return_t+17', 'y_best_alpha_1_t+17', 'y_best_beta_1_t+17', 'y_best_omega_t+17', 'y_best_mu_t+17', 'y_best_nu_tgarch_t+17', 'rv_t+18', 'lg_return_t+18', 'y_best_alpha_1_t+18', 'y_best_beta_1_t+18', 'y_best_omega_t+18', 'y_best_mu_t+18', 'y_best_nu_tgarch_t+18', 'rv_t+19', 'lg_return_t+19', 'y_best_alpha_1_t+19', 'y_best_beta_1_t+19', 'y_best_omega_t+19', 'y_best_mu_t+19', 'y_best_nu_tgarch_t+19', 'rv_t+20', 'lg_return_t+20', 'y_best_alpha_1_t+20', 'y_best_beta_1_t+20', 'y_best_omega_t+20', 'y_best_mu_t+20', 'y_best_nu_tgarch_t+20', 'rv_t+21', 'lg_return_t+21', 'y_best_alpha_1_t+21', 'y_best_beta_1_t+21', 'y_best_omega_t+21', 'y_best_mu_t+21', 'y_best_nu_tgarch_t+21', 'rv_t+22', 'lg_return_t+22', 'y_best_alpha_1_t+22', 'y_best_beta_1_t+22', 'y_best_omega_t+22', 'y_best_mu_t+22', 'y_best_nu_tgarch_t+22', 'rv_t+23', 'lg_return_t+23', 'y_best_alpha_1_t+23', 'y_best_beta_1_t+23', 'y_best_omega_t+23', 'y_best_mu_t+23', 'y_best_nu_tgarch_t+23', 'rv_t+24', 'lg_return_t+24', 'y_best_alpha_1_t+24', 'y_best_beta_1_t+24', 'y_best_omega_t+24', 'y_best_mu_t+24', 'y_best_nu_tgarch_t+24', 'rv_t+25', 'lg_return_t+25', 'y_best_alpha_1_t+25', 'y_best_beta_1_t+25', 'y_best_omega_t+25', 'y_best_mu_t+25', 'y_best_nu_tgarch_t+25', 'rv_t+26', 'lg_return_t+26', 'y_best_alpha_1_t+26', 'y_best_beta_1_t+26', 'y_best_omega_t+26', 'y_best_mu_t+26', 'y_best_nu_tgarch_t+26', 'rv_t+27', 'lg_return_t+27', 'y_best_alpha_1_t+27', 'y_best_beta_1_t+27', 'y_best_omega_t+27', 'y_best_mu_t+27', 'y_best_nu_tgarch_t+27', 'rv_t+28', 'lg_return_t+28', 'y_best_alpha_1_t+28', 'y_best_beta_1_t+28', 'y_best_omega_t+28', 'y_best_mu_t+28', 'y_best_nu_tgarch_t+28', 'rv_t+29', 'lg_return_t+29', 'y_best_alpha_1_t+29', 'y_best_beta_1_t+29', 'y_best_omega_t+29', 'y_best_mu_t+29', 'y_best_nu_tgarch_t+29', 'rv_t+30', 'lg_return_t+30', 'y_best_alpha_1_t+30', 'y_best_beta_1_t+30', 'y_best_omega_t+30', 'y_best_mu_t+30', 'y_best_nu_tgarch_t+30', 'rv_t+31', 'lg_return_t+31', 'y_best_alpha_1_t+31', 'y_best_beta_1_t+31', 'y_best_omega_t+31', 'y_best_mu_t+31', 'y_best_nu_tgarch_t+31', 'rv_t+32', 'lg_return_t+32', 'y_best_alpha_1_t+32', 'y_best_beta_1_t+32', 'y_best_omega_t+32', 'y_best_mu_t+32', 'y_best_nu_tgarch_t+32', 'rv_t+33', 'lg_return_t+33', 'y_best_alpha_1_t+33', 'y_best_beta_1_t+33', 'y_best_omega_t+33', 'y_best_mu_t+33', 'y_best_nu_tgarch_t+33', 'rv_t+34', 'lg_return_t+34', 'y_best_alpha_1_t+34', 'y_best_beta_1_t+34', 'y_best_omega_t+34', 'y_best_mu_t+34', 'y_best_nu_tgarch_t+34', 'rv_t+35', 'lg_return_t+35', 'y_best_alpha_1_t+35', 'y_best_beta_1_t+35', 'y_best_omega_t+35', 'y_best_mu_t+35', 'y_best_nu_tgarch_t+35', 'rv_t+36', 'lg_return_t+36', 'y_best_alpha_1_t+36', 'y_best_beta_1_t+36', 'y_best_omega_t+36', 'y_best_mu_t+36', 'y_best_nu_tgarch_t+36', 'rv_t+37', 'lg_return_t+37', 'y_best_alpha_1_t+37', 'y_best_beta_1_t+37', 'y_best_omega_t+37', 'y_best_mu_t+37', 'y_best_nu_tgarch_t+37', 'rv_t+38', 'lg_return_t+38', 'y_best_alpha_1_t+38', 'y_best_beta_1_t+38', 'y_best_omega_t+38', 'y_best_mu_t+38', 'y_best_nu_tgarch_t+38', 'rv_t+39', 'lg_return_t+39', 'y_best_alpha_1_t+39', 'y_best_beta_1_t+39', 'y_best_omega_t+39', 'y_best_mu_t+39', 'y_best_nu_tgarch_t+39', 'rv_t+40', 'lg_return_t+40', 'y_best_alpha_1_t+40', 'y_best_beta_1_t+40', 'y_best_omega_t+40', 'y_best_mu_t+40', 'y_best_nu_tgarch_t+40', 'rv_t+41', 'lg_return_t+41', 'y_best_alpha_1_t+41', 'y_best_beta_1_t+41', 'y_best_omega_t+41', 'y_best_mu_t+41', 'y_best_nu_tgarch_t+41', 'rv_t+42', 'lg_return_t+42', 'y_best_alpha_1_t+42', 'y_best_beta_1_t+42', 'y_best_omega_t+42', 'y_best_mu_t+42', 'y_best_nu_tgarch_t+42', 'rv_t+43', 'lg_return_t+43', 'y_best_alpha_1_t+43', 'y_best_beta_1_t+43', 'y_best_omega_t+43', 'y_best_mu_t+43', 'y_best_nu_tgarch_t+43', 'rv_t+44', 'lg_return_t+44', 'y_best_alpha_1_t+44', 'y_best_beta_1_t+44', 'y_best_omega_t+44', 'y_best_mu_t+44', 'y_best_nu_tgarch_t+44', 'rv_t+45', 'lg_return_t+45', 'y_best_alpha_1_t+45', 'y_best_beta_1_t+45', 'y_best_omega_t+45', 'y_best_mu_t+45', 'y_best_nu_tgarch_t+45', 'rv_t+46', 'lg_return_t+46', 'y_best_alpha_1_t+46', 'y_best_beta_1_t+46', 'y_best_omega_t+46', 'y_best_mu_t+46', 'y_best_nu_tgarch_t+46', 'rv_t+47', 'lg_return_t+47', 'y_best_alpha_1_t+47', 'y_best_beta_1_t+47', 'y_best_omega_t+47', 'y_best_mu_t+47', 'y_best_nu_tgarch_t+47', 'rv_t+48', 'lg_return_t+48', 'y_best_alpha_1_t+48', 'y_best_beta_1_t+48', 'y_best_omega_t+48', 'y_best_mu_t+48', 'y_best_nu_tgarch_t+48', 'rv_t+49', 'lg_return_t+49', 'y_best_alpha_1_t+49', 'y_best_beta_1_t+49', 'y_best_omega_t+49', 'y_best_mu_t+49', 'y_best_nu_tgarch_t+49', 'rv_t+50', 'lg_return_t+50', 'y_best_alpha_1_t+50', 'y_best_beta_1_t+50', 'y_best_omega_t+50', 'y_best_mu_t+50', 'y_best_nu_tgarch_t+50', 'rv_t+51', 'lg_return_t+51', 'y_best_alpha_1_t+51', 'y_best_beta_1_t+51', 'y_best_omega_t+51', 'y_best_mu_t+51', 'y_best_nu_tgarch_t+51', 'rv_t+52', 'lg_return_t+52', 'y_best_alpha_1_t+52', 'y_best_beta_1_t+52', 'y_best_omega_t+52', 'y_best_mu_t+52', 'y_best_nu_tgarch_t+52', 'rv_t+53', 'lg_return_t+53', 'y_best_alpha_1_t+53', 'y_best_beta_1_t+53', 'y_best_omega_t+53', 'y_best_mu_t+53', 'y_best_nu_tgarch_t+53', 'rv_t+54', 'lg_return_t+54', 'y_best_alpha_1_t+54', 'y_best_beta_1_t+54', 'y_best_omega_t+54', 'y_best_mu_t+54', 'y_best_nu_tgarch_t+54', 'rv_t+55', 'lg_return_t+55', 'y_best_alpha_1_t+55', 'y_best_beta_1_t+55', 'y_best_omega_t+55', 'y_best_mu_t+55', 'y_best_nu_tgarch_t+55', 'rv_t+56', 'lg_return_t+56', 'y_best_alpha_1_t+56', 'y_best_beta_1_t+56', 'y_best_omega_t+56', 'y_best_mu_t+56', 'y_best_nu_tgarch_t+56', 'rv_t+57', 'lg_return_t+57', 'y_best_alpha_1_t+57', 'y_best_beta_1_t+57', 'y_best_omega_t+57', 'y_best_mu_t+57', 'y_best_nu_tgarch_t+57', 'rv_t+58', 'lg_return_t+58', 'y_best_alpha_1_t+58', 'y_best_beta_1_t+58', 'y_best_omega_t+58', 'y_best_mu_t+58', 'y_best_nu_tgarch_t+58', 'rv_t+59', 'lg_return_t+59', 'y_best_alpha_1_t+59', 'y_best_beta_1_t+59', 'y_best_omega_t+59', 'y_best_mu_t+59', 'y_best_nu_tgarch_t+59', 'rv_t+60', 'lg_return_t+60', 'y_best_alpha_1_t+60', 'y_best_beta_1_t+60', 'y_best_omega_t+60', 'y_best_mu_t+60', 'y_best_nu_tgarch_t+60']

The forth type of data Compatible with Times Net and ITransformer¶

In [8]:
load_data_object_4_file_path = os.path.join(root_folder, objects_relative_path, "structured_data_dict_4.pkl")

with open(load_data_object_4_file_path, "rb") as f:
    structured_data_dict_4 = pickle.load(f)

print("Data dictionary 4 loaded successfully.")

eur4_X_price = structured_data_dict_4["EURUSD"].get("X_other")
eur4_X_time = structured_data_dict_4["EURUSD"].get("X_time")
eur4_y = structured_data_dict_4["EURUSD"].get("y")
eur4_y_columns = structured_data_dict_4["EURUSD"].get("y_columns")
eur4_X_columns = structured_data_dict_4["EURUSD"].get("X_other_columns")

print(eur4_X_columns)
print(eur4_y_columns)


print(eur4_y[:,0,0])
Data dictionary 4 loaded successfully.
['rv_t-59', 'rv_t-58', 'rv_t-57', 'rv_t-56', 'rv_t-55', 'rv_t-54', 'rv_t-53', 'rv_t-52', 'rv_t-51', 'rv_t-50', 'rv_t-49', 'rv_t-48', 'rv_t-47', 'rv_t-46', 'rv_t-45', 'rv_t-44', 'rv_t-43', 'rv_t-42', 'rv_t-41', 'rv_t-40', 'rv_t-39', 'rv_t-38', 'rv_t-37', 'rv_t-36', 'rv_t-35', 'rv_t-34', 'rv_t-33', 'rv_t-32', 'rv_t-31', 'rv_t-30', 'rv_t-29', 'rv_t-28', 'rv_t-27', 'rv_t-26', 'rv_t-25', 'rv_t-24', 'rv_t-23', 'rv_t-22', 'rv_t-21', 'rv_t-20', 'rv_t-19', 'rv_t-18', 'rv_t-17', 'rv_t-16', 'rv_t-15', 'rv_t-14', 'rv_t-13', 'rv_t-12', 'rv_t-11', 'rv_t-10', 'rv_t-9', 'rv_t-8', 'rv_t-7', 'rv_t-6', 'rv_t-5', 'rv_t-4', 'rv_t-3', 'rv_t-2', 'rv_t-1', 'rv_t-0']
['rv_t+1', 'lg_return_t+1', 'rv_t+2', 'lg_return_t+2', 'rv_t+3', 'lg_return_t+3', 'rv_t+4', 'lg_return_t+4', 'rv_t+5', 'lg_return_t+5', 'rv_t+6', 'lg_return_t+6', 'rv_t+7', 'lg_return_t+7', 'rv_t+8', 'lg_return_t+8', 'rv_t+9', 'lg_return_t+9', 'rv_t+10', 'lg_return_t+10', 'rv_t+11', 'lg_return_t+11', 'rv_t+12', 'lg_return_t+12', 'rv_t+13', 'lg_return_t+13', 'rv_t+14', 'lg_return_t+14', 'rv_t+15', 'lg_return_t+15', 'rv_t+16', 'lg_return_t+16', 'rv_t+17', 'lg_return_t+17', 'rv_t+18', 'lg_return_t+18', 'rv_t+19', 'lg_return_t+19', 'rv_t+20', 'lg_return_t+20', 'rv_t+21', 'lg_return_t+21', 'rv_t+22', 'lg_return_t+22', 'rv_t+23', 'lg_return_t+23', 'rv_t+24', 'lg_return_t+24', 'rv_t+25', 'lg_return_t+25', 'rv_t+26', 'lg_return_t+26', 'rv_t+27', 'lg_return_t+27', 'rv_t+28', 'lg_return_t+28', 'rv_t+29', 'lg_return_t+29', 'rv_t+30', 'lg_return_t+30', 'rv_t+31', 'lg_return_t+31', 'rv_t+32', 'lg_return_t+32', 'rv_t+33', 'lg_return_t+33', 'rv_t+34', 'lg_return_t+34', 'rv_t+35', 'lg_return_t+35', 'rv_t+36', 'lg_return_t+36', 'rv_t+37', 'lg_return_t+37', 'rv_t+38', 'lg_return_t+38', 'rv_t+39', 'lg_return_t+39', 'rv_t+40', 'lg_return_t+40', 'rv_t+41', 'lg_return_t+41', 'rv_t+42', 'lg_return_t+42', 'rv_t+43', 'lg_return_t+43', 'rv_t+44', 'lg_return_t+44', 'rv_t+45', 'lg_return_t+45', 'rv_t+46', 'lg_return_t+46', 'rv_t+47', 'lg_return_t+47', 'rv_t+48', 'lg_return_t+48', 'rv_t+49', 'lg_return_t+49', 'rv_t+50', 'lg_return_t+50', 'rv_t+51', 'lg_return_t+51', 'rv_t+52', 'lg_return_t+52', 'rv_t+53', 'lg_return_t+53', 'rv_t+54', 'lg_return_t+54', 'rv_t+55', 'lg_return_t+55', 'rv_t+56', 'lg_return_t+56', 'rv_t+57', 'lg_return_t+57', 'rv_t+58', 'lg_return_t+58', 'rv_t+59', 'lg_return_t+59', 'rv_t+60', 'lg_return_t+60']
[0.10789115 0.0094245  0.12050298 ... 0.35689057 0.31497138 0.32454847]

APPLE Test data¶

In [9]:
load_data_object_4_file_path = os.path.join(root_folder, objects_relative_path, "structured_data_dict_4.pkl")

with open(load_data_object_4_file_path, "rb") as f:
    structured_data_dict_4 = pickle.load(f)

print("Data dictionary 4 loaded successfully.")

appl4_X_price = structured_data_dict_4["AAPL"].get("X_other")
appl4_X_time = structured_data_dict_4["AAPL"].get("X_time")
appl4_y = structured_data_dict_4["AAPL"].get("y")
appl4_y_columns = structured_data_dict_4["AAPL"].get("y_columns")
appl4_X_columns = structured_data_dict_4["AAPL"].get("X_other_columns")

print(appl4_X_columns)
print(appl4_y_columns)


print(appl4_y[:,0,0])
Data dictionary 4 loaded successfully.
['rv_t-59', 'rv_t-58', 'rv_t-57', 'rv_t-56', 'rv_t-55', 'rv_t-54', 'rv_t-53', 'rv_t-52', 'rv_t-51', 'rv_t-50', 'rv_t-49', 'rv_t-48', 'rv_t-47', 'rv_t-46', 'rv_t-45', 'rv_t-44', 'rv_t-43', 'rv_t-42', 'rv_t-41', 'rv_t-40', 'rv_t-39', 'rv_t-38', 'rv_t-37', 'rv_t-36', 'rv_t-35', 'rv_t-34', 'rv_t-33', 'rv_t-32', 'rv_t-31', 'rv_t-30', 'rv_t-29', 'rv_t-28', 'rv_t-27', 'rv_t-26', 'rv_t-25', 'rv_t-24', 'rv_t-23', 'rv_t-22', 'rv_t-21', 'rv_t-20', 'rv_t-19', 'rv_t-18', 'rv_t-17', 'rv_t-16', 'rv_t-15', 'rv_t-14', 'rv_t-13', 'rv_t-12', 'rv_t-11', 'rv_t-10', 'rv_t-9', 'rv_t-8', 'rv_t-7', 'rv_t-6', 'rv_t-5', 'rv_t-4', 'rv_t-3', 'rv_t-2', 'rv_t-1', 'rv_t-0']
['rv_t+1', 'lg_return_t+1', 'rv_t+2', 'lg_return_t+2', 'rv_t+3', 'lg_return_t+3', 'rv_t+4', 'lg_return_t+4', 'rv_t+5', 'lg_return_t+5', 'rv_t+6', 'lg_return_t+6', 'rv_t+7', 'lg_return_t+7', 'rv_t+8', 'lg_return_t+8', 'rv_t+9', 'lg_return_t+9', 'rv_t+10', 'lg_return_t+10', 'rv_t+11', 'lg_return_t+11', 'rv_t+12', 'lg_return_t+12', 'rv_t+13', 'lg_return_t+13', 'rv_t+14', 'lg_return_t+14', 'rv_t+15', 'lg_return_t+15', 'rv_t+16', 'lg_return_t+16', 'rv_t+17', 'lg_return_t+17', 'rv_t+18', 'lg_return_t+18', 'rv_t+19', 'lg_return_t+19', 'rv_t+20', 'lg_return_t+20', 'rv_t+21', 'lg_return_t+21', 'rv_t+22', 'lg_return_t+22', 'rv_t+23', 'lg_return_t+23', 'rv_t+24', 'lg_return_t+24', 'rv_t+25', 'lg_return_t+25', 'rv_t+26', 'lg_return_t+26', 'rv_t+27', 'lg_return_t+27', 'rv_t+28', 'lg_return_t+28', 'rv_t+29', 'lg_return_t+29', 'rv_t+30', 'lg_return_t+30', 'rv_t+31', 'lg_return_t+31', 'rv_t+32', 'lg_return_t+32', 'rv_t+33', 'lg_return_t+33', 'rv_t+34', 'lg_return_t+34', 'rv_t+35', 'lg_return_t+35', 'rv_t+36', 'lg_return_t+36', 'rv_t+37', 'lg_return_t+37', 'rv_t+38', 'lg_return_t+38', 'rv_t+39', 'lg_return_t+39', 'rv_t+40', 'lg_return_t+40', 'rv_t+41', 'lg_return_t+41', 'rv_t+42', 'lg_return_t+42', 'rv_t+43', 'lg_return_t+43', 'rv_t+44', 'lg_return_t+44', 'rv_t+45', 'lg_return_t+45', 'rv_t+46', 'lg_return_t+46', 'rv_t+47', 'lg_return_t+47', 'rv_t+48', 'lg_return_t+48', 'rv_t+49', 'lg_return_t+49', 'rv_t+50', 'lg_return_t+50', 'rv_t+51', 'lg_return_t+51', 'rv_t+52', 'lg_return_t+52', 'rv_t+53', 'lg_return_t+53', 'rv_t+54', 'lg_return_t+54', 'rv_t+55', 'lg_return_t+55', 'rv_t+56', 'lg_return_t+56', 'rv_t+57', 'lg_return_t+57', 'rv_t+58', 'lg_return_t+58', 'rv_t+59', 'lg_return_t+59', 'rv_t+60', 'lg_return_t+60']
[  0.44148683   1.36512504   0.86625199 ... 112.40750435  30.95861336
  16.24492026]

Fift type of data¶

In [10]:
load_data_object_5_file_path = os.path.join(root_folder, objects_relative_path, "structured_data_dict_5.pkl")

with open(load_data_object_5_file_path, "rb") as f:
    structured_data_dict_5 = pickle.load(f)

print("Data dictionary 5 loaded successfully.")

eur5_X_price = structured_data_dict_5["EURUSD"].get("X_other")
eur5_X_time = structured_data_dict_5["EURUSD"].get("X_time")
eur5_y = structured_data_dict_5["EURUSD"].get("y")
eur5_y_columns = structured_data_dict_5["EURUSD"].get("y_columns")
eur5_X_columns = structured_data_dict_5["EURUSD"].get("X_other_columns")

print(eur5_X_columns)
print(eur5_y_columns)
Data dictionary 5 loaded successfully.
['rv_t-59', 'rv_t-58', 'rv_t-57', 'rv_t-56', 'rv_t-55', 'rv_t-54', 'rv_t-53', 'rv_t-52', 'rv_t-51', 'rv_t-50', 'rv_t-49', 'rv_t-48', 'rv_t-47', 'rv_t-46', 'rv_t-45', 'rv_t-44', 'rv_t-43', 'rv_t-42', 'rv_t-41', 'rv_t-40', 'rv_t-39', 'rv_t-38', 'rv_t-37', 'rv_t-36', 'rv_t-35', 'rv_t-34', 'rv_t-33', 'rv_t-32', 'rv_t-31', 'rv_t-30', 'rv_t-29', 'rv_t-28', 'rv_t-27', 'rv_t-26', 'rv_t-25', 'rv_t-24', 'rv_t-23', 'rv_t-22', 'rv_t-21', 'rv_t-20', 'rv_t-19', 'rv_t-18', 'rv_t-17', 'rv_t-16', 'rv_t-15', 'rv_t-14', 'rv_t-13', 'rv_t-12', 'rv_t-11', 'rv_t-10', 'rv_t-9', 'rv_t-8', 'rv_t-7', 'rv_t-6', 'rv_t-5', 'rv_t-4', 'rv_t-3', 'rv_t-2', 'rv_t-1', 'rv_t-0']
['rv_t+1', 'rv_t+2', 'rv_t+3', 'rv_t+4', 'rv_t+5', 'rv_t+6', 'rv_t+7', 'rv_t+8', 'rv_t+9', 'rv_t+10', 'rv_t+11', 'rv_t+12', 'rv_t+13', 'rv_t+14', 'rv_t+15', 'rv_t+16', 'rv_t+17', 'rv_t+18', 'rv_t+19', 'rv_t+20', 'rv_t+21', 'rv_t+22', 'rv_t+23', 'rv_t+24', 'rv_t+25', 'rv_t+26', 'rv_t+27', 'rv_t+28', 'rv_t+29', 'rv_t+30', 'rv_t+31', 'rv_t+32', 'rv_t+33', 'rv_t+34', 'rv_t+35', 'rv_t+36', 'rv_t+37', 'rv_t+38', 'rv_t+39', 'rv_t+40', 'rv_t+41', 'rv_t+42', 'rv_t+43', 'rv_t+44', 'rv_t+45', 'rv_t+46', 'rv_t+47', 'rv_t+48', 'rv_t+49', 'rv_t+50', 'rv_t+51', 'rv_t+52', 'rv_t+53', 'rv_t+54', 'rv_t+55', 'rv_t+56', 'rv_t+57', 'rv_t+58', 'rv_t+59', 'rv_t+60']

Sixt type of data¶

In [11]:
load_data_object_6_file_path = os.path.join(root_folder, objects_relative_path, "structured_data_dict_6.pkl")

with open(load_data_object_6_file_path, "rb") as f:
    structured_data_dict_6 = pickle.load(f)

print("Data dictionary 6 loaded successfully.")

eur6_X_price = structured_data_dict_6["EURUSD"].get("X_other")
eur6_X_time = structured_data_dict_6["EURUSD"].get("X_time")
eur6_y = structured_data_dict_6["EURUSD"].get("y")
eur6_y_columns = structured_data_dict_6["EURUSD"].get("y_columns")
eur6_X_columns = structured_data_dict_6["EURUSD"].get("X_other_columns")

print(eur6_X_columns)
print(eur6_y_columns)

print(eur6_X_price[0:5, 0, 1:3])
Data dictionary 6 loaded successfully.
['rv_t-59', 'x_best_alpha_1_t-59', 'x_best_beta_1_t-59', 'x_best_omega_t-59', 'x_best_mu_t-59', 'x_best_nu_tgarch_t-59', 'rv_t-58', 'x_best_alpha_1_t-58', 'x_best_beta_1_t-58', 'x_best_omega_t-58', 'x_best_mu_t-58', 'x_best_nu_tgarch_t-58', 'rv_t-57', 'x_best_alpha_1_t-57', 'x_best_beta_1_t-57', 'x_best_omega_t-57', 'x_best_mu_t-57', 'x_best_nu_tgarch_t-57', 'rv_t-56', 'x_best_alpha_1_t-56', 'x_best_beta_1_t-56', 'x_best_omega_t-56', 'x_best_mu_t-56', 'x_best_nu_tgarch_t-56', 'rv_t-55', 'x_best_alpha_1_t-55', 'x_best_beta_1_t-55', 'x_best_omega_t-55', 'x_best_mu_t-55', 'x_best_nu_tgarch_t-55', 'rv_t-54', 'x_best_alpha_1_t-54', 'x_best_beta_1_t-54', 'x_best_omega_t-54', 'x_best_mu_t-54', 'x_best_nu_tgarch_t-54', 'rv_t-53', 'x_best_alpha_1_t-53', 'x_best_beta_1_t-53', 'x_best_omega_t-53', 'x_best_mu_t-53', 'x_best_nu_tgarch_t-53', 'rv_t-52', 'x_best_alpha_1_t-52', 'x_best_beta_1_t-52', 'x_best_omega_t-52', 'x_best_mu_t-52', 'x_best_nu_tgarch_t-52', 'rv_t-51', 'x_best_alpha_1_t-51', 'x_best_beta_1_t-51', 'x_best_omega_t-51', 'x_best_mu_t-51', 'x_best_nu_tgarch_t-51', 'rv_t-50', 'x_best_alpha_1_t-50', 'x_best_beta_1_t-50', 'x_best_omega_t-50', 'x_best_mu_t-50', 'x_best_nu_tgarch_t-50', 'rv_t-49', 'x_best_alpha_1_t-49', 'x_best_beta_1_t-49', 'x_best_omega_t-49', 'x_best_mu_t-49', 'x_best_nu_tgarch_t-49', 'rv_t-48', 'x_best_alpha_1_t-48', 'x_best_beta_1_t-48', 'x_best_omega_t-48', 'x_best_mu_t-48', 'x_best_nu_tgarch_t-48', 'rv_t-47', 'x_best_alpha_1_t-47', 'x_best_beta_1_t-47', 'x_best_omega_t-47', 'x_best_mu_t-47', 'x_best_nu_tgarch_t-47', 'rv_t-46', 'x_best_alpha_1_t-46', 'x_best_beta_1_t-46', 'x_best_omega_t-46', 'x_best_mu_t-46', 'x_best_nu_tgarch_t-46', 'rv_t-45', 'x_best_alpha_1_t-45', 'x_best_beta_1_t-45', 'x_best_omega_t-45', 'x_best_mu_t-45', 'x_best_nu_tgarch_t-45', 'rv_t-44', 'x_best_alpha_1_t-44', 'x_best_beta_1_t-44', 'x_best_omega_t-44', 'x_best_mu_t-44', 'x_best_nu_tgarch_t-44', 'rv_t-43', 'x_best_alpha_1_t-43', 'x_best_beta_1_t-43', 'x_best_omega_t-43', 'x_best_mu_t-43', 'x_best_nu_tgarch_t-43', 'rv_t-42', 'x_best_alpha_1_t-42', 'x_best_beta_1_t-42', 'x_best_omega_t-42', 'x_best_mu_t-42', 'x_best_nu_tgarch_t-42', 'rv_t-41', 'x_best_alpha_1_t-41', 'x_best_beta_1_t-41', 'x_best_omega_t-41', 'x_best_mu_t-41', 'x_best_nu_tgarch_t-41', 'rv_t-40', 'x_best_alpha_1_t-40', 'x_best_beta_1_t-40', 'x_best_omega_t-40', 'x_best_mu_t-40', 'x_best_nu_tgarch_t-40', 'rv_t-39', 'x_best_alpha_1_t-39', 'x_best_beta_1_t-39', 'x_best_omega_t-39', 'x_best_mu_t-39', 'x_best_nu_tgarch_t-39', 'rv_t-38', 'x_best_alpha_1_t-38', 'x_best_beta_1_t-38', 'x_best_omega_t-38', 'x_best_mu_t-38', 'x_best_nu_tgarch_t-38', 'rv_t-37', 'x_best_alpha_1_t-37', 'x_best_beta_1_t-37', 'x_best_omega_t-37', 'x_best_mu_t-37', 'x_best_nu_tgarch_t-37', 'rv_t-36', 'x_best_alpha_1_t-36', 'x_best_beta_1_t-36', 'x_best_omega_t-36', 'x_best_mu_t-36', 'x_best_nu_tgarch_t-36', 'rv_t-35', 'x_best_alpha_1_t-35', 'x_best_beta_1_t-35', 'x_best_omega_t-35', 'x_best_mu_t-35', 'x_best_nu_tgarch_t-35', 'rv_t-34', 'x_best_alpha_1_t-34', 'x_best_beta_1_t-34', 'x_best_omega_t-34', 'x_best_mu_t-34', 'x_best_nu_tgarch_t-34', 'rv_t-33', 'x_best_alpha_1_t-33', 'x_best_beta_1_t-33', 'x_best_omega_t-33', 'x_best_mu_t-33', 'x_best_nu_tgarch_t-33', 'rv_t-32', 'x_best_alpha_1_t-32', 'x_best_beta_1_t-32', 'x_best_omega_t-32', 'x_best_mu_t-32', 'x_best_nu_tgarch_t-32', 'rv_t-31', 'x_best_alpha_1_t-31', 'x_best_beta_1_t-31', 'x_best_omega_t-31', 'x_best_mu_t-31', 'x_best_nu_tgarch_t-31', 'rv_t-30', 'x_best_alpha_1_t-30', 'x_best_beta_1_t-30', 'x_best_omega_t-30', 'x_best_mu_t-30', 'x_best_nu_tgarch_t-30', 'rv_t-29', 'x_best_alpha_1_t-29', 'x_best_beta_1_t-29', 'x_best_omega_t-29', 'x_best_mu_t-29', 'x_best_nu_tgarch_t-29', 'rv_t-28', 'x_best_alpha_1_t-28', 'x_best_beta_1_t-28', 'x_best_omega_t-28', 'x_best_mu_t-28', 'x_best_nu_tgarch_t-28', 'rv_t-27', 'x_best_alpha_1_t-27', 'x_best_beta_1_t-27', 'x_best_omega_t-27', 'x_best_mu_t-27', 'x_best_nu_tgarch_t-27', 'rv_t-26', 'x_best_alpha_1_t-26', 'x_best_beta_1_t-26', 'x_best_omega_t-26', 'x_best_mu_t-26', 'x_best_nu_tgarch_t-26', 'rv_t-25', 'x_best_alpha_1_t-25', 'x_best_beta_1_t-25', 'x_best_omega_t-25', 'x_best_mu_t-25', 'x_best_nu_tgarch_t-25', 'rv_t-24', 'x_best_alpha_1_t-24', 'x_best_beta_1_t-24', 'x_best_omega_t-24', 'x_best_mu_t-24', 'x_best_nu_tgarch_t-24', 'rv_t-23', 'x_best_alpha_1_t-23', 'x_best_beta_1_t-23', 'x_best_omega_t-23', 'x_best_mu_t-23', 'x_best_nu_tgarch_t-23', 'rv_t-22', 'x_best_alpha_1_t-22', 'x_best_beta_1_t-22', 'x_best_omega_t-22', 'x_best_mu_t-22', 'x_best_nu_tgarch_t-22', 'rv_t-21', 'x_best_alpha_1_t-21', 'x_best_beta_1_t-21', 'x_best_omega_t-21', 'x_best_mu_t-21', 'x_best_nu_tgarch_t-21', 'rv_t-20', 'x_best_alpha_1_t-20', 'x_best_beta_1_t-20', 'x_best_omega_t-20', 'x_best_mu_t-20', 'x_best_nu_tgarch_t-20', 'rv_t-19', 'x_best_alpha_1_t-19', 'x_best_beta_1_t-19', 'x_best_omega_t-19', 'x_best_mu_t-19', 'x_best_nu_tgarch_t-19', 'rv_t-18', 'x_best_alpha_1_t-18', 'x_best_beta_1_t-18', 'x_best_omega_t-18', 'x_best_mu_t-18', 'x_best_nu_tgarch_t-18', 'rv_t-17', 'x_best_alpha_1_t-17', 'x_best_beta_1_t-17', 'x_best_omega_t-17', 'x_best_mu_t-17', 'x_best_nu_tgarch_t-17', 'rv_t-16', 'x_best_alpha_1_t-16', 'x_best_beta_1_t-16', 'x_best_omega_t-16', 'x_best_mu_t-16', 'x_best_nu_tgarch_t-16', 'rv_t-15', 'x_best_alpha_1_t-15', 'x_best_beta_1_t-15', 'x_best_omega_t-15', 'x_best_mu_t-15', 'x_best_nu_tgarch_t-15', 'rv_t-14', 'x_best_alpha_1_t-14', 'x_best_beta_1_t-14', 'x_best_omega_t-14', 'x_best_mu_t-14', 'x_best_nu_tgarch_t-14', 'rv_t-13', 'x_best_alpha_1_t-13', 'x_best_beta_1_t-13', 'x_best_omega_t-13', 'x_best_mu_t-13', 'x_best_nu_tgarch_t-13', 'rv_t-12', 'x_best_alpha_1_t-12', 'x_best_beta_1_t-12', 'x_best_omega_t-12', 'x_best_mu_t-12', 'x_best_nu_tgarch_t-12', 'rv_t-11', 'x_best_alpha_1_t-11', 'x_best_beta_1_t-11', 'x_best_omega_t-11', 'x_best_mu_t-11', 'x_best_nu_tgarch_t-11', 'rv_t-10', 'x_best_alpha_1_t-10', 'x_best_beta_1_t-10', 'x_best_omega_t-10', 'x_best_mu_t-10', 'x_best_nu_tgarch_t-10', 'rv_t-9', 'x_best_alpha_1_t-9', 'x_best_beta_1_t-9', 'x_best_omega_t-9', 'x_best_mu_t-9', 'x_best_nu_tgarch_t-9', 'rv_t-8', 'x_best_alpha_1_t-8', 'x_best_beta_1_t-8', 'x_best_omega_t-8', 'x_best_mu_t-8', 'x_best_nu_tgarch_t-8', 'rv_t-7', 'x_best_alpha_1_t-7', 'x_best_beta_1_t-7', 'x_best_omega_t-7', 'x_best_mu_t-7', 'x_best_nu_tgarch_t-7', 'rv_t-6', 'x_best_alpha_1_t-6', 'x_best_beta_1_t-6', 'x_best_omega_t-6', 'x_best_mu_t-6', 'x_best_nu_tgarch_t-6', 'rv_t-5', 'x_best_alpha_1_t-5', 'x_best_beta_1_t-5', 'x_best_omega_t-5', 'x_best_mu_t-5', 'x_best_nu_tgarch_t-5', 'rv_t-4', 'x_best_alpha_1_t-4', 'x_best_beta_1_t-4', 'x_best_omega_t-4', 'x_best_mu_t-4', 'x_best_nu_tgarch_t-4', 'rv_t-3', 'x_best_alpha_1_t-3', 'x_best_beta_1_t-3', 'x_best_omega_t-3', 'x_best_mu_t-3', 'x_best_nu_tgarch_t-3', 'rv_t-2', 'x_best_alpha_1_t-2', 'x_best_beta_1_t-2', 'x_best_omega_t-2', 'x_best_mu_t-2', 'x_best_nu_tgarch_t-2', 'rv_t-1', 'x_best_alpha_1_t-1', 'x_best_beta_1_t-1', 'x_best_omega_t-1', 'x_best_mu_t-1', 'x_best_nu_tgarch_t-1', 'rv_t-0', 'x_best_alpha_1_t-0', 'x_best_beta_1_t-0', 'x_best_omega_t-0', 'x_best_mu_t-0', 'x_best_nu_tgarch_t-0']
['rv_t+1', 'y_best_alpha_1_t+1', 'y_best_beta_1_t+1', 'y_best_omega_t+1', 'y_best_mu_t+1', 'y_best_nu_tgarch_t+1', 'rv_t+2', 'y_best_alpha_1_t+2', 'y_best_beta_1_t+2', 'y_best_omega_t+2', 'y_best_mu_t+2', 'y_best_nu_tgarch_t+2', 'rv_t+3', 'y_best_alpha_1_t+3', 'y_best_beta_1_t+3', 'y_best_omega_t+3', 'y_best_mu_t+3', 'y_best_nu_tgarch_t+3', 'rv_t+4', 'y_best_alpha_1_t+4', 'y_best_beta_1_t+4', 'y_best_omega_t+4', 'y_best_mu_t+4', 'y_best_nu_tgarch_t+4', 'rv_t+5', 'y_best_alpha_1_t+5', 'y_best_beta_1_t+5', 'y_best_omega_t+5', 'y_best_mu_t+5', 'y_best_nu_tgarch_t+5', 'rv_t+6', 'y_best_alpha_1_t+6', 'y_best_beta_1_t+6', 'y_best_omega_t+6', 'y_best_mu_t+6', 'y_best_nu_tgarch_t+6', 'rv_t+7', 'y_best_alpha_1_t+7', 'y_best_beta_1_t+7', 'y_best_omega_t+7', 'y_best_mu_t+7', 'y_best_nu_tgarch_t+7', 'rv_t+8', 'y_best_alpha_1_t+8', 'y_best_beta_1_t+8', 'y_best_omega_t+8', 'y_best_mu_t+8', 'y_best_nu_tgarch_t+8', 'rv_t+9', 'y_best_alpha_1_t+9', 'y_best_beta_1_t+9', 'y_best_omega_t+9', 'y_best_mu_t+9', 'y_best_nu_tgarch_t+9', 'rv_t+10', 'y_best_alpha_1_t+10', 'y_best_beta_1_t+10', 'y_best_omega_t+10', 'y_best_mu_t+10', 'y_best_nu_tgarch_t+10', 'rv_t+11', 'y_best_alpha_1_t+11', 'y_best_beta_1_t+11', 'y_best_omega_t+11', 'y_best_mu_t+11', 'y_best_nu_tgarch_t+11', 'rv_t+12', 'y_best_alpha_1_t+12', 'y_best_beta_1_t+12', 'y_best_omega_t+12', 'y_best_mu_t+12', 'y_best_nu_tgarch_t+12', 'rv_t+13', 'y_best_alpha_1_t+13', 'y_best_beta_1_t+13', 'y_best_omega_t+13', 'y_best_mu_t+13', 'y_best_nu_tgarch_t+13', 'rv_t+14', 'y_best_alpha_1_t+14', 'y_best_beta_1_t+14', 'y_best_omega_t+14', 'y_best_mu_t+14', 'y_best_nu_tgarch_t+14', 'rv_t+15', 'y_best_alpha_1_t+15', 'y_best_beta_1_t+15', 'y_best_omega_t+15', 'y_best_mu_t+15', 'y_best_nu_tgarch_t+15', 'rv_t+16', 'y_best_alpha_1_t+16', 'y_best_beta_1_t+16', 'y_best_omega_t+16', 'y_best_mu_t+16', 'y_best_nu_tgarch_t+16', 'rv_t+17', 'y_best_alpha_1_t+17', 'y_best_beta_1_t+17', 'y_best_omega_t+17', 'y_best_mu_t+17', 'y_best_nu_tgarch_t+17', 'rv_t+18', 'y_best_alpha_1_t+18', 'y_best_beta_1_t+18', 'y_best_omega_t+18', 'y_best_mu_t+18', 'y_best_nu_tgarch_t+18', 'rv_t+19', 'y_best_alpha_1_t+19', 'y_best_beta_1_t+19', 'y_best_omega_t+19', 'y_best_mu_t+19', 'y_best_nu_tgarch_t+19', 'rv_t+20', 'y_best_alpha_1_t+20', 'y_best_beta_1_t+20', 'y_best_omega_t+20', 'y_best_mu_t+20', 'y_best_nu_tgarch_t+20', 'rv_t+21', 'y_best_alpha_1_t+21', 'y_best_beta_1_t+21', 'y_best_omega_t+21', 'y_best_mu_t+21', 'y_best_nu_tgarch_t+21', 'rv_t+22', 'y_best_alpha_1_t+22', 'y_best_beta_1_t+22', 'y_best_omega_t+22', 'y_best_mu_t+22', 'y_best_nu_tgarch_t+22', 'rv_t+23', 'y_best_alpha_1_t+23', 'y_best_beta_1_t+23', 'y_best_omega_t+23', 'y_best_mu_t+23', 'y_best_nu_tgarch_t+23', 'rv_t+24', 'y_best_alpha_1_t+24', 'y_best_beta_1_t+24', 'y_best_omega_t+24', 'y_best_mu_t+24', 'y_best_nu_tgarch_t+24', 'rv_t+25', 'y_best_alpha_1_t+25', 'y_best_beta_1_t+25', 'y_best_omega_t+25', 'y_best_mu_t+25', 'y_best_nu_tgarch_t+25', 'rv_t+26', 'y_best_alpha_1_t+26', 'y_best_beta_1_t+26', 'y_best_omega_t+26', 'y_best_mu_t+26', 'y_best_nu_tgarch_t+26', 'rv_t+27', 'y_best_alpha_1_t+27', 'y_best_beta_1_t+27', 'y_best_omega_t+27', 'y_best_mu_t+27', 'y_best_nu_tgarch_t+27', 'rv_t+28', 'y_best_alpha_1_t+28', 'y_best_beta_1_t+28', 'y_best_omega_t+28', 'y_best_mu_t+28', 'y_best_nu_tgarch_t+28', 'rv_t+29', 'y_best_alpha_1_t+29', 'y_best_beta_1_t+29', 'y_best_omega_t+29', 'y_best_mu_t+29', 'y_best_nu_tgarch_t+29', 'rv_t+30', 'y_best_alpha_1_t+30', 'y_best_beta_1_t+30', 'y_best_omega_t+30', 'y_best_mu_t+30', 'y_best_nu_tgarch_t+30', 'rv_t+31', 'y_best_alpha_1_t+31', 'y_best_beta_1_t+31', 'y_best_omega_t+31', 'y_best_mu_t+31', 'y_best_nu_tgarch_t+31', 'rv_t+32', 'y_best_alpha_1_t+32', 'y_best_beta_1_t+32', 'y_best_omega_t+32', 'y_best_mu_t+32', 'y_best_nu_tgarch_t+32', 'rv_t+33', 'y_best_alpha_1_t+33', 'y_best_beta_1_t+33', 'y_best_omega_t+33', 'y_best_mu_t+33', 'y_best_nu_tgarch_t+33', 'rv_t+34', 'y_best_alpha_1_t+34', 'y_best_beta_1_t+34', 'y_best_omega_t+34', 'y_best_mu_t+34', 'y_best_nu_tgarch_t+34', 'rv_t+35', 'y_best_alpha_1_t+35', 'y_best_beta_1_t+35', 'y_best_omega_t+35', 'y_best_mu_t+35', 'y_best_nu_tgarch_t+35', 'rv_t+36', 'y_best_alpha_1_t+36', 'y_best_beta_1_t+36', 'y_best_omega_t+36', 'y_best_mu_t+36', 'y_best_nu_tgarch_t+36', 'rv_t+37', 'y_best_alpha_1_t+37', 'y_best_beta_1_t+37', 'y_best_omega_t+37', 'y_best_mu_t+37', 'y_best_nu_tgarch_t+37', 'rv_t+38', 'y_best_alpha_1_t+38', 'y_best_beta_1_t+38', 'y_best_omega_t+38', 'y_best_mu_t+38', 'y_best_nu_tgarch_t+38', 'rv_t+39', 'y_best_alpha_1_t+39', 'y_best_beta_1_t+39', 'y_best_omega_t+39', 'y_best_mu_t+39', 'y_best_nu_tgarch_t+39', 'rv_t+40', 'y_best_alpha_1_t+40', 'y_best_beta_1_t+40', 'y_best_omega_t+40', 'y_best_mu_t+40', 'y_best_nu_tgarch_t+40', 'rv_t+41', 'y_best_alpha_1_t+41', 'y_best_beta_1_t+41', 'y_best_omega_t+41', 'y_best_mu_t+41', 'y_best_nu_tgarch_t+41', 'rv_t+42', 'y_best_alpha_1_t+42', 'y_best_beta_1_t+42', 'y_best_omega_t+42', 'y_best_mu_t+42', 'y_best_nu_tgarch_t+42', 'rv_t+43', 'y_best_alpha_1_t+43', 'y_best_beta_1_t+43', 'y_best_omega_t+43', 'y_best_mu_t+43', 'y_best_nu_tgarch_t+43', 'rv_t+44', 'y_best_alpha_1_t+44', 'y_best_beta_1_t+44', 'y_best_omega_t+44', 'y_best_mu_t+44', 'y_best_nu_tgarch_t+44', 'rv_t+45', 'y_best_alpha_1_t+45', 'y_best_beta_1_t+45', 'y_best_omega_t+45', 'y_best_mu_t+45', 'y_best_nu_tgarch_t+45', 'rv_t+46', 'y_best_alpha_1_t+46', 'y_best_beta_1_t+46', 'y_best_omega_t+46', 'y_best_mu_t+46', 'y_best_nu_tgarch_t+46', 'rv_t+47', 'y_best_alpha_1_t+47', 'y_best_beta_1_t+47', 'y_best_omega_t+47', 'y_best_mu_t+47', 'y_best_nu_tgarch_t+47', 'rv_t+48', 'y_best_alpha_1_t+48', 'y_best_beta_1_t+48', 'y_best_omega_t+48', 'y_best_mu_t+48', 'y_best_nu_tgarch_t+48', 'rv_t+49', 'y_best_alpha_1_t+49', 'y_best_beta_1_t+49', 'y_best_omega_t+49', 'y_best_mu_t+49', 'y_best_nu_tgarch_t+49', 'rv_t+50', 'y_best_alpha_1_t+50', 'y_best_beta_1_t+50', 'y_best_omega_t+50', 'y_best_mu_t+50', 'y_best_nu_tgarch_t+50', 'rv_t+51', 'y_best_alpha_1_t+51', 'y_best_beta_1_t+51', 'y_best_omega_t+51', 'y_best_mu_t+51', 'y_best_nu_tgarch_t+51', 'rv_t+52', 'y_best_alpha_1_t+52', 'y_best_beta_1_t+52', 'y_best_omega_t+52', 'y_best_mu_t+52', 'y_best_nu_tgarch_t+52', 'rv_t+53', 'y_best_alpha_1_t+53', 'y_best_beta_1_t+53', 'y_best_omega_t+53', 'y_best_mu_t+53', 'y_best_nu_tgarch_t+53', 'rv_t+54', 'y_best_alpha_1_t+54', 'y_best_beta_1_t+54', 'y_best_omega_t+54', 'y_best_mu_t+54', 'y_best_nu_tgarch_t+54', 'rv_t+55', 'y_best_alpha_1_t+55', 'y_best_beta_1_t+55', 'y_best_omega_t+55', 'y_best_mu_t+55', 'y_best_nu_tgarch_t+55', 'rv_t+56', 'y_best_alpha_1_t+56', 'y_best_beta_1_t+56', 'y_best_omega_t+56', 'y_best_mu_t+56', 'y_best_nu_tgarch_t+56', 'rv_t+57', 'y_best_alpha_1_t+57', 'y_best_beta_1_t+57', 'y_best_omega_t+57', 'y_best_mu_t+57', 'y_best_nu_tgarch_t+57', 'rv_t+58', 'y_best_alpha_1_t+58', 'y_best_beta_1_t+58', 'y_best_omega_t+58', 'y_best_mu_t+58', 'y_best_nu_tgarch_t+58', 'rv_t+59', 'y_best_alpha_1_t+59', 'y_best_beta_1_t+59', 'y_best_omega_t+59', 'y_best_mu_t+59', 'y_best_nu_tgarch_t+59', 'rv_t+60', 'y_best_alpha_1_t+60', 'y_best_beta_1_t+60', 'y_best_omega_t+60', 'y_best_mu_t+60', 'y_best_nu_tgarch_t+60']
[[3.31688180e-11 9.97802572e-01]
 [2.13094142e-02 9.63861495e-01]
 [1.84962299e-02 9.70108134e-01]
 [3.04809820e-17 9.98244968e-01]
 [8.17518273e-15 9.98014310e-01]]

Load data v7¶

In [12]:
load_data_object_7_file_path = os.path.join(root_folder, objects_relative_path, "structured_data_dict_7.pkl")

with open(load_data_object_7_file_path, "rb") as f:
    structured_data_dict_7 = pickle.load(f)

print("Data dictionary 7 loaded successfully.")

eur7_X_price = structured_data_dict_7["EURUSD"].get("X_other")
eur7_X_time = structured_data_dict_7["EURUSD"].get("X_time")
eur7_y = structured_data_dict_7["EURUSD"].get("y")
eur7_y_columns = structured_data_dict_7["EURUSD"].get("y_columns")
eur7_X_columns = structured_data_dict_7["EURUSD"].get("X_other_columns")

print(eur7_X_columns)
print(eur7_y_columns)

df_xprice = summarize_features(eur7_X_price, eur7_X_columns, name="X_price")
df_y = summarize_features(eur7_y, eur7_y_columns, name="y")
print(df_xprice.head(20))
print(df_y.head(20))
Data dictionary 7 loaded successfully.
['rv_t-59', 'x_best_alpha_1_t-59', 'x_best_beta_1_t-59', 'x_best_omega_t-59', 'x_best_mu_t-59', 'x_best_nu_tgarch_t-59', 'rv_t-58', 'x_best_alpha_1_t-58', 'x_best_beta_1_t-58', 'x_best_omega_t-58', 'x_best_mu_t-58', 'x_best_nu_tgarch_t-58', 'rv_t-57', 'x_best_alpha_1_t-57', 'x_best_beta_1_t-57', 'x_best_omega_t-57', 'x_best_mu_t-57', 'x_best_nu_tgarch_t-57', 'rv_t-56', 'x_best_alpha_1_t-56', 'x_best_beta_1_t-56', 'x_best_omega_t-56', 'x_best_mu_t-56', 'x_best_nu_tgarch_t-56', 'rv_t-55', 'x_best_alpha_1_t-55', 'x_best_beta_1_t-55', 'x_best_omega_t-55', 'x_best_mu_t-55', 'x_best_nu_tgarch_t-55', 'rv_t-54', 'x_best_alpha_1_t-54', 'x_best_beta_1_t-54', 'x_best_omega_t-54', 'x_best_mu_t-54', 'x_best_nu_tgarch_t-54', 'rv_t-53', 'x_best_alpha_1_t-53', 'x_best_beta_1_t-53', 'x_best_omega_t-53', 'x_best_mu_t-53', 'x_best_nu_tgarch_t-53', 'rv_t-52', 'x_best_alpha_1_t-52', 'x_best_beta_1_t-52', 'x_best_omega_t-52', 'x_best_mu_t-52', 'x_best_nu_tgarch_t-52', 'rv_t-51', 'x_best_alpha_1_t-51', 'x_best_beta_1_t-51', 'x_best_omega_t-51', 'x_best_mu_t-51', 'x_best_nu_tgarch_t-51', 'rv_t-50', 'x_best_alpha_1_t-50', 'x_best_beta_1_t-50', 'x_best_omega_t-50', 'x_best_mu_t-50', 'x_best_nu_tgarch_t-50', 'rv_t-49', 'x_best_alpha_1_t-49', 'x_best_beta_1_t-49', 'x_best_omega_t-49', 'x_best_mu_t-49', 'x_best_nu_tgarch_t-49', 'rv_t-48', 'x_best_alpha_1_t-48', 'x_best_beta_1_t-48', 'x_best_omega_t-48', 'x_best_mu_t-48', 'x_best_nu_tgarch_t-48', 'rv_t-47', 'x_best_alpha_1_t-47', 'x_best_beta_1_t-47', 'x_best_omega_t-47', 'x_best_mu_t-47', 'x_best_nu_tgarch_t-47', 'rv_t-46', 'x_best_alpha_1_t-46', 'x_best_beta_1_t-46', 'x_best_omega_t-46', 'x_best_mu_t-46', 'x_best_nu_tgarch_t-46', 'rv_t-45', 'x_best_alpha_1_t-45', 'x_best_beta_1_t-45', 'x_best_omega_t-45', 'x_best_mu_t-45', 'x_best_nu_tgarch_t-45', 'rv_t-44', 'x_best_alpha_1_t-44', 'x_best_beta_1_t-44', 'x_best_omega_t-44', 'x_best_mu_t-44', 'x_best_nu_tgarch_t-44', 'rv_t-43', 'x_best_alpha_1_t-43', 'x_best_beta_1_t-43', 'x_best_omega_t-43', 'x_best_mu_t-43', 'x_best_nu_tgarch_t-43', 'rv_t-42', 'x_best_alpha_1_t-42', 'x_best_beta_1_t-42', 'x_best_omega_t-42', 'x_best_mu_t-42', 'x_best_nu_tgarch_t-42', 'rv_t-41', 'x_best_alpha_1_t-41', 'x_best_beta_1_t-41', 'x_best_omega_t-41', 'x_best_mu_t-41', 'x_best_nu_tgarch_t-41', 'rv_t-40', 'x_best_alpha_1_t-40', 'x_best_beta_1_t-40', 'x_best_omega_t-40', 'x_best_mu_t-40', 'x_best_nu_tgarch_t-40', 'rv_t-39', 'x_best_alpha_1_t-39', 'x_best_beta_1_t-39', 'x_best_omega_t-39', 'x_best_mu_t-39', 'x_best_nu_tgarch_t-39', 'rv_t-38', 'x_best_alpha_1_t-38', 'x_best_beta_1_t-38', 'x_best_omega_t-38', 'x_best_mu_t-38', 'x_best_nu_tgarch_t-38', 'rv_t-37', 'x_best_alpha_1_t-37', 'x_best_beta_1_t-37', 'x_best_omega_t-37', 'x_best_mu_t-37', 'x_best_nu_tgarch_t-37', 'rv_t-36', 'x_best_alpha_1_t-36', 'x_best_beta_1_t-36', 'x_best_omega_t-36', 'x_best_mu_t-36', 'x_best_nu_tgarch_t-36', 'rv_t-35', 'x_best_alpha_1_t-35', 'x_best_beta_1_t-35', 'x_best_omega_t-35', 'x_best_mu_t-35', 'x_best_nu_tgarch_t-35', 'rv_t-34', 'x_best_alpha_1_t-34', 'x_best_beta_1_t-34', 'x_best_omega_t-34', 'x_best_mu_t-34', 'x_best_nu_tgarch_t-34', 'rv_t-33', 'x_best_alpha_1_t-33', 'x_best_beta_1_t-33', 'x_best_omega_t-33', 'x_best_mu_t-33', 'x_best_nu_tgarch_t-33', 'rv_t-32', 'x_best_alpha_1_t-32', 'x_best_beta_1_t-32', 'x_best_omega_t-32', 'x_best_mu_t-32', 'x_best_nu_tgarch_t-32', 'rv_t-31', 'x_best_alpha_1_t-31', 'x_best_beta_1_t-31', 'x_best_omega_t-31', 'x_best_mu_t-31', 'x_best_nu_tgarch_t-31', 'rv_t-30', 'x_best_alpha_1_t-30', 'x_best_beta_1_t-30', 'x_best_omega_t-30', 'x_best_mu_t-30', 'x_best_nu_tgarch_t-30', 'rv_t-29', 'x_best_alpha_1_t-29', 'x_best_beta_1_t-29', 'x_best_omega_t-29', 'x_best_mu_t-29', 'x_best_nu_tgarch_t-29', 'rv_t-28', 'x_best_alpha_1_t-28', 'x_best_beta_1_t-28', 'x_best_omega_t-28', 'x_best_mu_t-28', 'x_best_nu_tgarch_t-28', 'rv_t-27', 'x_best_alpha_1_t-27', 'x_best_beta_1_t-27', 'x_best_omega_t-27', 'x_best_mu_t-27', 'x_best_nu_tgarch_t-27', 'rv_t-26', 'x_best_alpha_1_t-26', 'x_best_beta_1_t-26', 'x_best_omega_t-26', 'x_best_mu_t-26', 'x_best_nu_tgarch_t-26', 'rv_t-25', 'x_best_alpha_1_t-25', 'x_best_beta_1_t-25', 'x_best_omega_t-25', 'x_best_mu_t-25', 'x_best_nu_tgarch_t-25', 'rv_t-24', 'x_best_alpha_1_t-24', 'x_best_beta_1_t-24', 'x_best_omega_t-24', 'x_best_mu_t-24', 'x_best_nu_tgarch_t-24', 'rv_t-23', 'x_best_alpha_1_t-23', 'x_best_beta_1_t-23', 'x_best_omega_t-23', 'x_best_mu_t-23', 'x_best_nu_tgarch_t-23', 'rv_t-22', 'x_best_alpha_1_t-22', 'x_best_beta_1_t-22', 'x_best_omega_t-22', 'x_best_mu_t-22', 'x_best_nu_tgarch_t-22', 'rv_t-21', 'x_best_alpha_1_t-21', 'x_best_beta_1_t-21', 'x_best_omega_t-21', 'x_best_mu_t-21', 'x_best_nu_tgarch_t-21', 'rv_t-20', 'x_best_alpha_1_t-20', 'x_best_beta_1_t-20', 'x_best_omega_t-20', 'x_best_mu_t-20', 'x_best_nu_tgarch_t-20', 'rv_t-19', 'x_best_alpha_1_t-19', 'x_best_beta_1_t-19', 'x_best_omega_t-19', 'x_best_mu_t-19', 'x_best_nu_tgarch_t-19', 'rv_t-18', 'x_best_alpha_1_t-18', 'x_best_beta_1_t-18', 'x_best_omega_t-18', 'x_best_mu_t-18', 'x_best_nu_tgarch_t-18', 'rv_t-17', 'x_best_alpha_1_t-17', 'x_best_beta_1_t-17', 'x_best_omega_t-17', 'x_best_mu_t-17', 'x_best_nu_tgarch_t-17', 'rv_t-16', 'x_best_alpha_1_t-16', 'x_best_beta_1_t-16', 'x_best_omega_t-16', 'x_best_mu_t-16', 'x_best_nu_tgarch_t-16', 'rv_t-15', 'x_best_alpha_1_t-15', 'x_best_beta_1_t-15', 'x_best_omega_t-15', 'x_best_mu_t-15', 'x_best_nu_tgarch_t-15', 'rv_t-14', 'x_best_alpha_1_t-14', 'x_best_beta_1_t-14', 'x_best_omega_t-14', 'x_best_mu_t-14', 'x_best_nu_tgarch_t-14', 'rv_t-13', 'x_best_alpha_1_t-13', 'x_best_beta_1_t-13', 'x_best_omega_t-13', 'x_best_mu_t-13', 'x_best_nu_tgarch_t-13', 'rv_t-12', 'x_best_alpha_1_t-12', 'x_best_beta_1_t-12', 'x_best_omega_t-12', 'x_best_mu_t-12', 'x_best_nu_tgarch_t-12', 'rv_t-11', 'x_best_alpha_1_t-11', 'x_best_beta_1_t-11', 'x_best_omega_t-11', 'x_best_mu_t-11', 'x_best_nu_tgarch_t-11', 'rv_t-10', 'x_best_alpha_1_t-10', 'x_best_beta_1_t-10', 'x_best_omega_t-10', 'x_best_mu_t-10', 'x_best_nu_tgarch_t-10', 'rv_t-9', 'x_best_alpha_1_t-9', 'x_best_beta_1_t-9', 'x_best_omega_t-9', 'x_best_mu_t-9', 'x_best_nu_tgarch_t-9', 'rv_t-8', 'x_best_alpha_1_t-8', 'x_best_beta_1_t-8', 'x_best_omega_t-8', 'x_best_mu_t-8', 'x_best_nu_tgarch_t-8', 'rv_t-7', 'x_best_alpha_1_t-7', 'x_best_beta_1_t-7', 'x_best_omega_t-7', 'x_best_mu_t-7', 'x_best_nu_tgarch_t-7', 'rv_t-6', 'x_best_alpha_1_t-6', 'x_best_beta_1_t-6', 'x_best_omega_t-6', 'x_best_mu_t-6', 'x_best_nu_tgarch_t-6', 'rv_t-5', 'x_best_alpha_1_t-5', 'x_best_beta_1_t-5', 'x_best_omega_t-5', 'x_best_mu_t-5', 'x_best_nu_tgarch_t-5', 'rv_t-4', 'x_best_alpha_1_t-4', 'x_best_beta_1_t-4', 'x_best_omega_t-4', 'x_best_mu_t-4', 'x_best_nu_tgarch_t-4', 'rv_t-3', 'x_best_alpha_1_t-3', 'x_best_beta_1_t-3', 'x_best_omega_t-3', 'x_best_mu_t-3', 'x_best_nu_tgarch_t-3', 'rv_t-2', 'x_best_alpha_1_t-2', 'x_best_beta_1_t-2', 'x_best_omega_t-2', 'x_best_mu_t-2', 'x_best_nu_tgarch_t-2', 'rv_t-1', 'x_best_alpha_1_t-1', 'x_best_beta_1_t-1', 'x_best_omega_t-1', 'x_best_mu_t-1', 'x_best_nu_tgarch_t-1', 'rv_t-0', 'x_best_alpha_1_t-0', 'x_best_beta_1_t-0', 'x_best_omega_t-0', 'x_best_mu_t-0', 'x_best_nu_tgarch_t-0']
['rv_t+1', 'y_best_alpha_1_t+1', 'y_best_beta_1_t+1', 'y_best_omega_t+1', 'y_best_mu_t+1', 'y_best_nu_tgarch_t+1', 'rv_t+2', 'y_best_alpha_1_t+2', 'y_best_beta_1_t+2', 'y_best_omega_t+2', 'y_best_mu_t+2', 'y_best_nu_tgarch_t+2', 'rv_t+3', 'y_best_alpha_1_t+3', 'y_best_beta_1_t+3', 'y_best_omega_t+3', 'y_best_mu_t+3', 'y_best_nu_tgarch_t+3', 'rv_t+4', 'y_best_alpha_1_t+4', 'y_best_beta_1_t+4', 'y_best_omega_t+4', 'y_best_mu_t+4', 'y_best_nu_tgarch_t+4', 'rv_t+5', 'y_best_alpha_1_t+5', 'y_best_beta_1_t+5', 'y_best_omega_t+5', 'y_best_mu_t+5', 'y_best_nu_tgarch_t+5', 'rv_t+6', 'y_best_alpha_1_t+6', 'y_best_beta_1_t+6', 'y_best_omega_t+6', 'y_best_mu_t+6', 'y_best_nu_tgarch_t+6', 'rv_t+7', 'y_best_alpha_1_t+7', 'y_best_beta_1_t+7', 'y_best_omega_t+7', 'y_best_mu_t+7', 'y_best_nu_tgarch_t+7', 'rv_t+8', 'y_best_alpha_1_t+8', 'y_best_beta_1_t+8', 'y_best_omega_t+8', 'y_best_mu_t+8', 'y_best_nu_tgarch_t+8', 'rv_t+9', 'y_best_alpha_1_t+9', 'y_best_beta_1_t+9', 'y_best_omega_t+9', 'y_best_mu_t+9', 'y_best_nu_tgarch_t+9', 'rv_t+10', 'y_best_alpha_1_t+10', 'y_best_beta_1_t+10', 'y_best_omega_t+10', 'y_best_mu_t+10', 'y_best_nu_tgarch_t+10', 'rv_t+11', 'y_best_alpha_1_t+11', 'y_best_beta_1_t+11', 'y_best_omega_t+11', 'y_best_mu_t+11', 'y_best_nu_tgarch_t+11', 'rv_t+12', 'y_best_alpha_1_t+12', 'y_best_beta_1_t+12', 'y_best_omega_t+12', 'y_best_mu_t+12', 'y_best_nu_tgarch_t+12', 'rv_t+13', 'y_best_alpha_1_t+13', 'y_best_beta_1_t+13', 'y_best_omega_t+13', 'y_best_mu_t+13', 'y_best_nu_tgarch_t+13', 'rv_t+14', 'y_best_alpha_1_t+14', 'y_best_beta_1_t+14', 'y_best_omega_t+14', 'y_best_mu_t+14', 'y_best_nu_tgarch_t+14', 'rv_t+15', 'y_best_alpha_1_t+15', 'y_best_beta_1_t+15', 'y_best_omega_t+15', 'y_best_mu_t+15', 'y_best_nu_tgarch_t+15', 'rv_t+16', 'y_best_alpha_1_t+16', 'y_best_beta_1_t+16', 'y_best_omega_t+16', 'y_best_mu_t+16', 'y_best_nu_tgarch_t+16', 'rv_t+17', 'y_best_alpha_1_t+17', 'y_best_beta_1_t+17', 'y_best_omega_t+17', 'y_best_mu_t+17', 'y_best_nu_tgarch_t+17', 'rv_t+18', 'y_best_alpha_1_t+18', 'y_best_beta_1_t+18', 'y_best_omega_t+18', 'y_best_mu_t+18', 'y_best_nu_tgarch_t+18', 'rv_t+19', 'y_best_alpha_1_t+19', 'y_best_beta_1_t+19', 'y_best_omega_t+19', 'y_best_mu_t+19', 'y_best_nu_tgarch_t+19', 'rv_t+20', 'y_best_alpha_1_t+20', 'y_best_beta_1_t+20', 'y_best_omega_t+20', 'y_best_mu_t+20', 'y_best_nu_tgarch_t+20', 'rv_t+21', 'y_best_alpha_1_t+21', 'y_best_beta_1_t+21', 'y_best_omega_t+21', 'y_best_mu_t+21', 'y_best_nu_tgarch_t+21', 'rv_t+22', 'y_best_alpha_1_t+22', 'y_best_beta_1_t+22', 'y_best_omega_t+22', 'y_best_mu_t+22', 'y_best_nu_tgarch_t+22', 'rv_t+23', 'y_best_alpha_1_t+23', 'y_best_beta_1_t+23', 'y_best_omega_t+23', 'y_best_mu_t+23', 'y_best_nu_tgarch_t+23', 'rv_t+24', 'y_best_alpha_1_t+24', 'y_best_beta_1_t+24', 'y_best_omega_t+24', 'y_best_mu_t+24', 'y_best_nu_tgarch_t+24', 'rv_t+25', 'y_best_alpha_1_t+25', 'y_best_beta_1_t+25', 'y_best_omega_t+25', 'y_best_mu_t+25', 'y_best_nu_tgarch_t+25', 'rv_t+26', 'y_best_alpha_1_t+26', 'y_best_beta_1_t+26', 'y_best_omega_t+26', 'y_best_mu_t+26', 'y_best_nu_tgarch_t+26', 'rv_t+27', 'y_best_alpha_1_t+27', 'y_best_beta_1_t+27', 'y_best_omega_t+27', 'y_best_mu_t+27', 'y_best_nu_tgarch_t+27', 'rv_t+28', 'y_best_alpha_1_t+28', 'y_best_beta_1_t+28', 'y_best_omega_t+28', 'y_best_mu_t+28', 'y_best_nu_tgarch_t+28', 'rv_t+29', 'y_best_alpha_1_t+29', 'y_best_beta_1_t+29', 'y_best_omega_t+29', 'y_best_mu_t+29', 'y_best_nu_tgarch_t+29', 'rv_t+30', 'y_best_alpha_1_t+30', 'y_best_beta_1_t+30', 'y_best_omega_t+30', 'y_best_mu_t+30', 'y_best_nu_tgarch_t+30', 'rv_t+31', 'y_best_alpha_1_t+31', 'y_best_beta_1_t+31', 'y_best_omega_t+31', 'y_best_mu_t+31', 'y_best_nu_tgarch_t+31', 'rv_t+32', 'y_best_alpha_1_t+32', 'y_best_beta_1_t+32', 'y_best_omega_t+32', 'y_best_mu_t+32', 'y_best_nu_tgarch_t+32', 'rv_t+33', 'y_best_alpha_1_t+33', 'y_best_beta_1_t+33', 'y_best_omega_t+33', 'y_best_mu_t+33', 'y_best_nu_tgarch_t+33', 'rv_t+34', 'y_best_alpha_1_t+34', 'y_best_beta_1_t+34', 'y_best_omega_t+34', 'y_best_mu_t+34', 'y_best_nu_tgarch_t+34', 'rv_t+35', 'y_best_alpha_1_t+35', 'y_best_beta_1_t+35', 'y_best_omega_t+35', 'y_best_mu_t+35', 'y_best_nu_tgarch_t+35', 'rv_t+36', 'y_best_alpha_1_t+36', 'y_best_beta_1_t+36', 'y_best_omega_t+36', 'y_best_mu_t+36', 'y_best_nu_tgarch_t+36', 'rv_t+37', 'y_best_alpha_1_t+37', 'y_best_beta_1_t+37', 'y_best_omega_t+37', 'y_best_mu_t+37', 'y_best_nu_tgarch_t+37', 'rv_t+38', 'y_best_alpha_1_t+38', 'y_best_beta_1_t+38', 'y_best_omega_t+38', 'y_best_mu_t+38', 'y_best_nu_tgarch_t+38', 'rv_t+39', 'y_best_alpha_1_t+39', 'y_best_beta_1_t+39', 'y_best_omega_t+39', 'y_best_mu_t+39', 'y_best_nu_tgarch_t+39', 'rv_t+40', 'y_best_alpha_1_t+40', 'y_best_beta_1_t+40', 'y_best_omega_t+40', 'y_best_mu_t+40', 'y_best_nu_tgarch_t+40', 'rv_t+41', 'y_best_alpha_1_t+41', 'y_best_beta_1_t+41', 'y_best_omega_t+41', 'y_best_mu_t+41', 'y_best_nu_tgarch_t+41', 'rv_t+42', 'y_best_alpha_1_t+42', 'y_best_beta_1_t+42', 'y_best_omega_t+42', 'y_best_mu_t+42', 'y_best_nu_tgarch_t+42', 'rv_t+43', 'y_best_alpha_1_t+43', 'y_best_beta_1_t+43', 'y_best_omega_t+43', 'y_best_mu_t+43', 'y_best_nu_tgarch_t+43', 'rv_t+44', 'y_best_alpha_1_t+44', 'y_best_beta_1_t+44', 'y_best_omega_t+44', 'y_best_mu_t+44', 'y_best_nu_tgarch_t+44', 'rv_t+45', 'y_best_alpha_1_t+45', 'y_best_beta_1_t+45', 'y_best_omega_t+45', 'y_best_mu_t+45', 'y_best_nu_tgarch_t+45', 'rv_t+46', 'y_best_alpha_1_t+46', 'y_best_beta_1_t+46', 'y_best_omega_t+46', 'y_best_mu_t+46', 'y_best_nu_tgarch_t+46', 'rv_t+47', 'y_best_alpha_1_t+47', 'y_best_beta_1_t+47', 'y_best_omega_t+47', 'y_best_mu_t+47', 'y_best_nu_tgarch_t+47', 'rv_t+48', 'y_best_alpha_1_t+48', 'y_best_beta_1_t+48', 'y_best_omega_t+48', 'y_best_mu_t+48', 'y_best_nu_tgarch_t+48', 'rv_t+49', 'y_best_alpha_1_t+49', 'y_best_beta_1_t+49', 'y_best_omega_t+49', 'y_best_mu_t+49', 'y_best_nu_tgarch_t+49', 'rv_t+50', 'y_best_alpha_1_t+50', 'y_best_beta_1_t+50', 'y_best_omega_t+50', 'y_best_mu_t+50', 'y_best_nu_tgarch_t+50', 'rv_t+51', 'y_best_alpha_1_t+51', 'y_best_beta_1_t+51', 'y_best_omega_t+51', 'y_best_mu_t+51', 'y_best_nu_tgarch_t+51', 'rv_t+52', 'y_best_alpha_1_t+52', 'y_best_beta_1_t+52', 'y_best_omega_t+52', 'y_best_mu_t+52', 'y_best_nu_tgarch_t+52', 'rv_t+53', 'y_best_alpha_1_t+53', 'y_best_beta_1_t+53', 'y_best_omega_t+53', 'y_best_mu_t+53', 'y_best_nu_tgarch_t+53', 'rv_t+54', 'y_best_alpha_1_t+54', 'y_best_beta_1_t+54', 'y_best_omega_t+54', 'y_best_mu_t+54', 'y_best_nu_tgarch_t+54', 'rv_t+55', 'y_best_alpha_1_t+55', 'y_best_beta_1_t+55', 'y_best_omega_t+55', 'y_best_mu_t+55', 'y_best_nu_tgarch_t+55', 'rv_t+56', 'y_best_alpha_1_t+56', 'y_best_beta_1_t+56', 'y_best_omega_t+56', 'y_best_mu_t+56', 'y_best_nu_tgarch_t+56', 'rv_t+57', 'y_best_alpha_1_t+57', 'y_best_beta_1_t+57', 'y_best_omega_t+57', 'y_best_mu_t+57', 'y_best_nu_tgarch_t+57', 'rv_t+58', 'y_best_alpha_1_t+58', 'y_best_beta_1_t+58', 'y_best_omega_t+58', 'y_best_mu_t+58', 'y_best_nu_tgarch_t+58', 'rv_t+59', 'y_best_alpha_1_t+59', 'y_best_beta_1_t+59', 'y_best_omega_t+59', 'y_best_mu_t+59', 'y_best_nu_tgarch_t+59', 'rv_t+60', 'y_best_alpha_1_t+60', 'y_best_beta_1_t+60', 'y_best_omega_t+60', 'y_best_mu_t+60', 'y_best_nu_tgarch_t+60']
[X_price] shape=(3782, 60, 6) | NaN: 0 | +Inf: 0 | -Inf: 0 | Any Inf: 0
[y] shape=(3782, 60, 6) | NaN: 0 | +Inf: 0 | -Inf: 0 | Any Inf: 0
  feature       min           p01    median         p99         max      mean  \
0      f5  0.000000  3.031849e+00  5.034044  106.962327  345.033304  7.723787   
1      f0  0.000096  2.542676e-03  0.151064    1.340520    5.997252  0.220419   
2      f2  0.000000  0.000000e+00  0.954145    1.000000    1.000000  0.817385   
3      f3  0.000000  9.629686e-10  0.003832    0.219574    0.424595  0.028887   
4      f1  0.000000  0.000000e+00  0.006112    0.136720    0.205071  0.024899   
5      f4 -0.071164 -6.033247e-02 -0.002668    0.037148    0.043256 -0.003317   

         std  
0  18.160206  
1   0.287035  
2   0.315141  
3   0.052974  
4   0.034081  
5   0.020569  
  feature           min           p01    median         p99         max  \
0      f5  2.400685e+00  3.057405e+00  4.889405  108.026063  339.032393   
1      f0  9.561894e-05  2.542676e-03  0.151394    1.342547    5.997252   
2      f2  0.000000e+00  0.000000e+00  0.955367    1.000000    1.000000   
3      f3  7.501062e-10  9.604417e-10  0.003576    0.219686    0.424926   
4      f1  0.000000e+00  0.000000e+00  0.001614    0.137276    0.202748   
5      f4 -7.201752e-02 -6.041314e-02 -0.002971    0.037185    0.043658   

       mean        std  
0  7.677354  17.956992  
1  0.221915   0.290001  
2  0.820012   0.313103  
3  0.028502   0.052676  
4  0.024652   0.034231  
5 -0.003463   0.020489  

Load data v8¶

In [13]:
load_data_object_8_file_path = os.path.join(root_folder, objects_relative_path, "structured_data_dict_8.pkl")

with open(load_data_object_8_file_path, "rb") as f:
    structured_data_dict_8 = pickle.load(f)

print("Data dictionary 8 loaded successfully.")

eur8_X_price = structured_data_dict_8["EURUSD"].get("X_other")
eur8_X_time = structured_data_dict_8["EURUSD"].get("X_time")
eur8_y = structured_data_dict_8["EURUSD"].get("y")
eur8_y_columns = structured_data_dict_8["EURUSD"].get("y_columns")
eur8_X_columns = structured_data_dict_8["EURUSD"].get("X_other_columns")

print(eur8_X_columns)
print(eur8_y_columns)


df_xprice = summarize_features(eur8_X_price, eur8_X_columns, name="X_price")
df_y = summarize_features(eur8_y, eur8_y_columns, name="y")
print(df_xprice.head(20))
print(df_y.head(20))
Data dictionary 8 loaded successfully.
['rv_t-59', 'rv_t-58', 'rv_t-57', 'rv_t-56', 'rv_t-55', 'rv_t-54', 'rv_t-53', 'rv_t-52', 'rv_t-51', 'rv_t-50', 'rv_t-49', 'rv_t-48', 'rv_t-47', 'rv_t-46', 'rv_t-45', 'rv_t-44', 'rv_t-43', 'rv_t-42', 'rv_t-41', 'rv_t-40', 'rv_t-39', 'rv_t-38', 'rv_t-37', 'rv_t-36', 'rv_t-35', 'rv_t-34', 'rv_t-33', 'rv_t-32', 'rv_t-31', 'rv_t-30', 'rv_t-29', 'rv_t-28', 'rv_t-27', 'rv_t-26', 'rv_t-25', 'rv_t-24', 'rv_t-23', 'rv_t-22', 'rv_t-21', 'rv_t-20', 'rv_t-19', 'rv_t-18', 'rv_t-17', 'rv_t-16', 'rv_t-15', 'rv_t-14', 'rv_t-13', 'rv_t-12', 'rv_t-11', 'rv_t-10', 'rv_t-9', 'rv_t-8', 'rv_t-7', 'rv_t-6', 'rv_t-5', 'rv_t-4', 'rv_t-3', 'rv_t-2', 'rv_t-1', 'rv_t-0']
['rv_t+1', 'y_best_alpha_1_t+1', 'y_best_beta_1_t+1', 'y_best_omega_t+1', 'y_best_mu_t+1', 'y_best_nu_tgarch_t+1', 'rv_t+2', 'y_best_alpha_1_t+2', 'y_best_beta_1_t+2', 'y_best_omega_t+2', 'y_best_mu_t+2', 'y_best_nu_tgarch_t+2', 'rv_t+3', 'y_best_alpha_1_t+3', 'y_best_beta_1_t+3', 'y_best_omega_t+3', 'y_best_mu_t+3', 'y_best_nu_tgarch_t+3', 'rv_t+4', 'y_best_alpha_1_t+4', 'y_best_beta_1_t+4', 'y_best_omega_t+4', 'y_best_mu_t+4', 'y_best_nu_tgarch_t+4', 'rv_t+5', 'y_best_alpha_1_t+5', 'y_best_beta_1_t+5', 'y_best_omega_t+5', 'y_best_mu_t+5', 'y_best_nu_tgarch_t+5', 'rv_t+6', 'y_best_alpha_1_t+6', 'y_best_beta_1_t+6', 'y_best_omega_t+6', 'y_best_mu_t+6', 'y_best_nu_tgarch_t+6', 'rv_t+7', 'y_best_alpha_1_t+7', 'y_best_beta_1_t+7', 'y_best_omega_t+7', 'y_best_mu_t+7', 'y_best_nu_tgarch_t+7', 'rv_t+8', 'y_best_alpha_1_t+8', 'y_best_beta_1_t+8', 'y_best_omega_t+8', 'y_best_mu_t+8', 'y_best_nu_tgarch_t+8', 'rv_t+9', 'y_best_alpha_1_t+9', 'y_best_beta_1_t+9', 'y_best_omega_t+9', 'y_best_mu_t+9', 'y_best_nu_tgarch_t+9', 'rv_t+10', 'y_best_alpha_1_t+10', 'y_best_beta_1_t+10', 'y_best_omega_t+10', 'y_best_mu_t+10', 'y_best_nu_tgarch_t+10', 'rv_t+11', 'y_best_alpha_1_t+11', 'y_best_beta_1_t+11', 'y_best_omega_t+11', 'y_best_mu_t+11', 'y_best_nu_tgarch_t+11', 'rv_t+12', 'y_best_alpha_1_t+12', 'y_best_beta_1_t+12', 'y_best_omega_t+12', 'y_best_mu_t+12', 'y_best_nu_tgarch_t+12', 'rv_t+13', 'y_best_alpha_1_t+13', 'y_best_beta_1_t+13', 'y_best_omega_t+13', 'y_best_mu_t+13', 'y_best_nu_tgarch_t+13', 'rv_t+14', 'y_best_alpha_1_t+14', 'y_best_beta_1_t+14', 'y_best_omega_t+14', 'y_best_mu_t+14', 'y_best_nu_tgarch_t+14', 'rv_t+15', 'y_best_alpha_1_t+15', 'y_best_beta_1_t+15', 'y_best_omega_t+15', 'y_best_mu_t+15', 'y_best_nu_tgarch_t+15', 'rv_t+16', 'y_best_alpha_1_t+16', 'y_best_beta_1_t+16', 'y_best_omega_t+16', 'y_best_mu_t+16', 'y_best_nu_tgarch_t+16', 'rv_t+17', 'y_best_alpha_1_t+17', 'y_best_beta_1_t+17', 'y_best_omega_t+17', 'y_best_mu_t+17', 'y_best_nu_tgarch_t+17', 'rv_t+18', 'y_best_alpha_1_t+18', 'y_best_beta_1_t+18', 'y_best_omega_t+18', 'y_best_mu_t+18', 'y_best_nu_tgarch_t+18', 'rv_t+19', 'y_best_alpha_1_t+19', 'y_best_beta_1_t+19', 'y_best_omega_t+19', 'y_best_mu_t+19', 'y_best_nu_tgarch_t+19', 'rv_t+20', 'y_best_alpha_1_t+20', 'y_best_beta_1_t+20', 'y_best_omega_t+20', 'y_best_mu_t+20', 'y_best_nu_tgarch_t+20', 'rv_t+21', 'y_best_alpha_1_t+21', 'y_best_beta_1_t+21', 'y_best_omega_t+21', 'y_best_mu_t+21', 'y_best_nu_tgarch_t+21', 'rv_t+22', 'y_best_alpha_1_t+22', 'y_best_beta_1_t+22', 'y_best_omega_t+22', 'y_best_mu_t+22', 'y_best_nu_tgarch_t+22', 'rv_t+23', 'y_best_alpha_1_t+23', 'y_best_beta_1_t+23', 'y_best_omega_t+23', 'y_best_mu_t+23', 'y_best_nu_tgarch_t+23', 'rv_t+24', 'y_best_alpha_1_t+24', 'y_best_beta_1_t+24', 'y_best_omega_t+24', 'y_best_mu_t+24', 'y_best_nu_tgarch_t+24', 'rv_t+25', 'y_best_alpha_1_t+25', 'y_best_beta_1_t+25', 'y_best_omega_t+25', 'y_best_mu_t+25', 'y_best_nu_tgarch_t+25', 'rv_t+26', 'y_best_alpha_1_t+26', 'y_best_beta_1_t+26', 'y_best_omega_t+26', 'y_best_mu_t+26', 'y_best_nu_tgarch_t+26', 'rv_t+27', 'y_best_alpha_1_t+27', 'y_best_beta_1_t+27', 'y_best_omega_t+27', 'y_best_mu_t+27', 'y_best_nu_tgarch_t+27', 'rv_t+28', 'y_best_alpha_1_t+28', 'y_best_beta_1_t+28', 'y_best_omega_t+28', 'y_best_mu_t+28', 'y_best_nu_tgarch_t+28', 'rv_t+29', 'y_best_alpha_1_t+29', 'y_best_beta_1_t+29', 'y_best_omega_t+29', 'y_best_mu_t+29', 'y_best_nu_tgarch_t+29', 'rv_t+30', 'y_best_alpha_1_t+30', 'y_best_beta_1_t+30', 'y_best_omega_t+30', 'y_best_mu_t+30', 'y_best_nu_tgarch_t+30', 'rv_t+31', 'y_best_alpha_1_t+31', 'y_best_beta_1_t+31', 'y_best_omega_t+31', 'y_best_mu_t+31', 'y_best_nu_tgarch_t+31', 'rv_t+32', 'y_best_alpha_1_t+32', 'y_best_beta_1_t+32', 'y_best_omega_t+32', 'y_best_mu_t+32', 'y_best_nu_tgarch_t+32', 'rv_t+33', 'y_best_alpha_1_t+33', 'y_best_beta_1_t+33', 'y_best_omega_t+33', 'y_best_mu_t+33', 'y_best_nu_tgarch_t+33', 'rv_t+34', 'y_best_alpha_1_t+34', 'y_best_beta_1_t+34', 'y_best_omega_t+34', 'y_best_mu_t+34', 'y_best_nu_tgarch_t+34', 'rv_t+35', 'y_best_alpha_1_t+35', 'y_best_beta_1_t+35', 'y_best_omega_t+35', 'y_best_mu_t+35', 'y_best_nu_tgarch_t+35', 'rv_t+36', 'y_best_alpha_1_t+36', 'y_best_beta_1_t+36', 'y_best_omega_t+36', 'y_best_mu_t+36', 'y_best_nu_tgarch_t+36', 'rv_t+37', 'y_best_alpha_1_t+37', 'y_best_beta_1_t+37', 'y_best_omega_t+37', 'y_best_mu_t+37', 'y_best_nu_tgarch_t+37', 'rv_t+38', 'y_best_alpha_1_t+38', 'y_best_beta_1_t+38', 'y_best_omega_t+38', 'y_best_mu_t+38', 'y_best_nu_tgarch_t+38', 'rv_t+39', 'y_best_alpha_1_t+39', 'y_best_beta_1_t+39', 'y_best_omega_t+39', 'y_best_mu_t+39', 'y_best_nu_tgarch_t+39', 'rv_t+40', 'y_best_alpha_1_t+40', 'y_best_beta_1_t+40', 'y_best_omega_t+40', 'y_best_mu_t+40', 'y_best_nu_tgarch_t+40', 'rv_t+41', 'y_best_alpha_1_t+41', 'y_best_beta_1_t+41', 'y_best_omega_t+41', 'y_best_mu_t+41', 'y_best_nu_tgarch_t+41', 'rv_t+42', 'y_best_alpha_1_t+42', 'y_best_beta_1_t+42', 'y_best_omega_t+42', 'y_best_mu_t+42', 'y_best_nu_tgarch_t+42', 'rv_t+43', 'y_best_alpha_1_t+43', 'y_best_beta_1_t+43', 'y_best_omega_t+43', 'y_best_mu_t+43', 'y_best_nu_tgarch_t+43', 'rv_t+44', 'y_best_alpha_1_t+44', 'y_best_beta_1_t+44', 'y_best_omega_t+44', 'y_best_mu_t+44', 'y_best_nu_tgarch_t+44', 'rv_t+45', 'y_best_alpha_1_t+45', 'y_best_beta_1_t+45', 'y_best_omega_t+45', 'y_best_mu_t+45', 'y_best_nu_tgarch_t+45', 'rv_t+46', 'y_best_alpha_1_t+46', 'y_best_beta_1_t+46', 'y_best_omega_t+46', 'y_best_mu_t+46', 'y_best_nu_tgarch_t+46', 'rv_t+47', 'y_best_alpha_1_t+47', 'y_best_beta_1_t+47', 'y_best_omega_t+47', 'y_best_mu_t+47', 'y_best_nu_tgarch_t+47', 'rv_t+48', 'y_best_alpha_1_t+48', 'y_best_beta_1_t+48', 'y_best_omega_t+48', 'y_best_mu_t+48', 'y_best_nu_tgarch_t+48', 'rv_t+49', 'y_best_alpha_1_t+49', 'y_best_beta_1_t+49', 'y_best_omega_t+49', 'y_best_mu_t+49', 'y_best_nu_tgarch_t+49', 'rv_t+50', 'y_best_alpha_1_t+50', 'y_best_beta_1_t+50', 'y_best_omega_t+50', 'y_best_mu_t+50', 'y_best_nu_tgarch_t+50', 'rv_t+51', 'y_best_alpha_1_t+51', 'y_best_beta_1_t+51', 'y_best_omega_t+51', 'y_best_mu_t+51', 'y_best_nu_tgarch_t+51', 'rv_t+52', 'y_best_alpha_1_t+52', 'y_best_beta_1_t+52', 'y_best_omega_t+52', 'y_best_mu_t+52', 'y_best_nu_tgarch_t+52', 'rv_t+53', 'y_best_alpha_1_t+53', 'y_best_beta_1_t+53', 'y_best_omega_t+53', 'y_best_mu_t+53', 'y_best_nu_tgarch_t+53', 'rv_t+54', 'y_best_alpha_1_t+54', 'y_best_beta_1_t+54', 'y_best_omega_t+54', 'y_best_mu_t+54', 'y_best_nu_tgarch_t+54', 'rv_t+55', 'y_best_alpha_1_t+55', 'y_best_beta_1_t+55', 'y_best_omega_t+55', 'y_best_mu_t+55', 'y_best_nu_tgarch_t+55', 'rv_t+56', 'y_best_alpha_1_t+56', 'y_best_beta_1_t+56', 'y_best_omega_t+56', 'y_best_mu_t+56', 'y_best_nu_tgarch_t+56', 'rv_t+57', 'y_best_alpha_1_t+57', 'y_best_beta_1_t+57', 'y_best_omega_t+57', 'y_best_mu_t+57', 'y_best_nu_tgarch_t+57', 'rv_t+58', 'y_best_alpha_1_t+58', 'y_best_beta_1_t+58', 'y_best_omega_t+58', 'y_best_mu_t+58', 'y_best_nu_tgarch_t+58', 'rv_t+59', 'y_best_alpha_1_t+59', 'y_best_beta_1_t+59', 'y_best_omega_t+59', 'y_best_mu_t+59', 'y_best_nu_tgarch_t+59', 'rv_t+60', 'y_best_alpha_1_t+60', 'y_best_beta_1_t+60', 'y_best_omega_t+60', 'y_best_mu_t+60', 'y_best_nu_tgarch_t+60']
[X_price] shape=(3782, 60, 1) | NaN: 0 | +Inf: 0 | -Inf: 0 | Any Inf: 0
[y] shape=(3782, 60, 6) | NaN: 0 | +Inf: 0 | -Inf: 0 | Any Inf: 0
  feature       min       p01    median      p99       max      mean       std
0      f0  0.000096  0.002543  0.151064  1.34052  5.997252  0.220419  0.287035
  feature           min           p01    median         p99         max  \
0      f5  2.400685e+00  3.057405e+00  4.889405  108.026063  339.032393   
1      f0  9.561894e-05  2.542676e-03  0.151394    1.342547    5.997252   
2      f2  0.000000e+00  0.000000e+00  0.955367    1.000000    1.000000   
3      f3  7.501062e-10  9.604417e-10  0.003576    0.219686    0.424926   
4      f1  0.000000e+00  0.000000e+00  0.001614    0.137276    0.202748   
5      f4 -7.201752e-02 -6.041314e-02 -0.002971    0.037185    0.043658   

       mean        std  
0  7.677354  17.956992  
1  0.221915   0.290001  
2  0.820012   0.313103  
3  0.028502   0.052676  
4  0.024652   0.034231  
5 -0.003463   0.020489  

Apple v8¶

In [14]:
appl8_X_price = structured_data_dict_8["AAPL"].get("X_other")
appl8_X_time = structured_data_dict_8["AAPL"].get("X_time")
appl8_y = structured_data_dict_8["AAPL"].get("y")
appl8_y_columns = structured_data_dict_8["AAPL"].get("y_columns")
appl8_X_columns = structured_data_dict_8["AAPL"].get("X_other_columns")

print(appl8_X_columns)
print(appl8_y_columns)


print(appl8_y[:,0,0])
['rv_t-59', 'rv_t-58', 'rv_t-57', 'rv_t-56', 'rv_t-55', 'rv_t-54', 'rv_t-53', 'rv_t-52', 'rv_t-51', 'rv_t-50', 'rv_t-49', 'rv_t-48', 'rv_t-47', 'rv_t-46', 'rv_t-45', 'rv_t-44', 'rv_t-43', 'rv_t-42', 'rv_t-41', 'rv_t-40', 'rv_t-39', 'rv_t-38', 'rv_t-37', 'rv_t-36', 'rv_t-35', 'rv_t-34', 'rv_t-33', 'rv_t-32', 'rv_t-31', 'rv_t-30', 'rv_t-29', 'rv_t-28', 'rv_t-27', 'rv_t-26', 'rv_t-25', 'rv_t-24', 'rv_t-23', 'rv_t-22', 'rv_t-21', 'rv_t-20', 'rv_t-19', 'rv_t-18', 'rv_t-17', 'rv_t-16', 'rv_t-15', 'rv_t-14', 'rv_t-13', 'rv_t-12', 'rv_t-11', 'rv_t-10', 'rv_t-9', 'rv_t-8', 'rv_t-7', 'rv_t-6', 'rv_t-5', 'rv_t-4', 'rv_t-3', 'rv_t-2', 'rv_t-1', 'rv_t-0']
['rv_t+1', 'y_best_alpha_1_t+1', 'y_best_beta_1_t+1', 'y_best_omega_t+1', 'y_best_mu_t+1', 'y_best_nu_tgarch_t+1', 'rv_t+2', 'y_best_alpha_1_t+2', 'y_best_beta_1_t+2', 'y_best_omega_t+2', 'y_best_mu_t+2', 'y_best_nu_tgarch_t+2', 'rv_t+3', 'y_best_alpha_1_t+3', 'y_best_beta_1_t+3', 'y_best_omega_t+3', 'y_best_mu_t+3', 'y_best_nu_tgarch_t+3', 'rv_t+4', 'y_best_alpha_1_t+4', 'y_best_beta_1_t+4', 'y_best_omega_t+4', 'y_best_mu_t+4', 'y_best_nu_tgarch_t+4', 'rv_t+5', 'y_best_alpha_1_t+5', 'y_best_beta_1_t+5', 'y_best_omega_t+5', 'y_best_mu_t+5', 'y_best_nu_tgarch_t+5', 'rv_t+6', 'y_best_alpha_1_t+6', 'y_best_beta_1_t+6', 'y_best_omega_t+6', 'y_best_mu_t+6', 'y_best_nu_tgarch_t+6', 'rv_t+7', 'y_best_alpha_1_t+7', 'y_best_beta_1_t+7', 'y_best_omega_t+7', 'y_best_mu_t+7', 'y_best_nu_tgarch_t+7', 'rv_t+8', 'y_best_alpha_1_t+8', 'y_best_beta_1_t+8', 'y_best_omega_t+8', 'y_best_mu_t+8', 'y_best_nu_tgarch_t+8', 'rv_t+9', 'y_best_alpha_1_t+9', 'y_best_beta_1_t+9', 'y_best_omega_t+9', 'y_best_mu_t+9', 'y_best_nu_tgarch_t+9', 'rv_t+10', 'y_best_alpha_1_t+10', 'y_best_beta_1_t+10', 'y_best_omega_t+10', 'y_best_mu_t+10', 'y_best_nu_tgarch_t+10', 'rv_t+11', 'y_best_alpha_1_t+11', 'y_best_beta_1_t+11', 'y_best_omega_t+11', 'y_best_mu_t+11', 'y_best_nu_tgarch_t+11', 'rv_t+12', 'y_best_alpha_1_t+12', 'y_best_beta_1_t+12', 'y_best_omega_t+12', 'y_best_mu_t+12', 'y_best_nu_tgarch_t+12', 'rv_t+13', 'y_best_alpha_1_t+13', 'y_best_beta_1_t+13', 'y_best_omega_t+13', 'y_best_mu_t+13', 'y_best_nu_tgarch_t+13', 'rv_t+14', 'y_best_alpha_1_t+14', 'y_best_beta_1_t+14', 'y_best_omega_t+14', 'y_best_mu_t+14', 'y_best_nu_tgarch_t+14', 'rv_t+15', 'y_best_alpha_1_t+15', 'y_best_beta_1_t+15', 'y_best_omega_t+15', 'y_best_mu_t+15', 'y_best_nu_tgarch_t+15', 'rv_t+16', 'y_best_alpha_1_t+16', 'y_best_beta_1_t+16', 'y_best_omega_t+16', 'y_best_mu_t+16', 'y_best_nu_tgarch_t+16', 'rv_t+17', 'y_best_alpha_1_t+17', 'y_best_beta_1_t+17', 'y_best_omega_t+17', 'y_best_mu_t+17', 'y_best_nu_tgarch_t+17', 'rv_t+18', 'y_best_alpha_1_t+18', 'y_best_beta_1_t+18', 'y_best_omega_t+18', 'y_best_mu_t+18', 'y_best_nu_tgarch_t+18', 'rv_t+19', 'y_best_alpha_1_t+19', 'y_best_beta_1_t+19', 'y_best_omega_t+19', 'y_best_mu_t+19', 'y_best_nu_tgarch_t+19', 'rv_t+20', 'y_best_alpha_1_t+20', 'y_best_beta_1_t+20', 'y_best_omega_t+20', 'y_best_mu_t+20', 'y_best_nu_tgarch_t+20', 'rv_t+21', 'y_best_alpha_1_t+21', 'y_best_beta_1_t+21', 'y_best_omega_t+21', 'y_best_mu_t+21', 'y_best_nu_tgarch_t+21', 'rv_t+22', 'y_best_alpha_1_t+22', 'y_best_beta_1_t+22', 'y_best_omega_t+22', 'y_best_mu_t+22', 'y_best_nu_tgarch_t+22', 'rv_t+23', 'y_best_alpha_1_t+23', 'y_best_beta_1_t+23', 'y_best_omega_t+23', 'y_best_mu_t+23', 'y_best_nu_tgarch_t+23', 'rv_t+24', 'y_best_alpha_1_t+24', 'y_best_beta_1_t+24', 'y_best_omega_t+24', 'y_best_mu_t+24', 'y_best_nu_tgarch_t+24', 'rv_t+25', 'y_best_alpha_1_t+25', 'y_best_beta_1_t+25', 'y_best_omega_t+25', 'y_best_mu_t+25', 'y_best_nu_tgarch_t+25', 'rv_t+26', 'y_best_alpha_1_t+26', 'y_best_beta_1_t+26', 'y_best_omega_t+26', 'y_best_mu_t+26', 'y_best_nu_tgarch_t+26', 'rv_t+27', 'y_best_alpha_1_t+27', 'y_best_beta_1_t+27', 'y_best_omega_t+27', 'y_best_mu_t+27', 'y_best_nu_tgarch_t+27', 'rv_t+28', 'y_best_alpha_1_t+28', 'y_best_beta_1_t+28', 'y_best_omega_t+28', 'y_best_mu_t+28', 'y_best_nu_tgarch_t+28', 'rv_t+29', 'y_best_alpha_1_t+29', 'y_best_beta_1_t+29', 'y_best_omega_t+29', 'y_best_mu_t+29', 'y_best_nu_tgarch_t+29', 'rv_t+30', 'y_best_alpha_1_t+30', 'y_best_beta_1_t+30', 'y_best_omega_t+30', 'y_best_mu_t+30', 'y_best_nu_tgarch_t+30', 'rv_t+31', 'y_best_alpha_1_t+31', 'y_best_beta_1_t+31', 'y_best_omega_t+31', 'y_best_mu_t+31', 'y_best_nu_tgarch_t+31', 'rv_t+32', 'y_best_alpha_1_t+32', 'y_best_beta_1_t+32', 'y_best_omega_t+32', 'y_best_mu_t+32', 'y_best_nu_tgarch_t+32', 'rv_t+33', 'y_best_alpha_1_t+33', 'y_best_beta_1_t+33', 'y_best_omega_t+33', 'y_best_mu_t+33', 'y_best_nu_tgarch_t+33', 'rv_t+34', 'y_best_alpha_1_t+34', 'y_best_beta_1_t+34', 'y_best_omega_t+34', 'y_best_mu_t+34', 'y_best_nu_tgarch_t+34', 'rv_t+35', 'y_best_alpha_1_t+35', 'y_best_beta_1_t+35', 'y_best_omega_t+35', 'y_best_mu_t+35', 'y_best_nu_tgarch_t+35', 'rv_t+36', 'y_best_alpha_1_t+36', 'y_best_beta_1_t+36', 'y_best_omega_t+36', 'y_best_mu_t+36', 'y_best_nu_tgarch_t+36', 'rv_t+37', 'y_best_alpha_1_t+37', 'y_best_beta_1_t+37', 'y_best_omega_t+37', 'y_best_mu_t+37', 'y_best_nu_tgarch_t+37', 'rv_t+38', 'y_best_alpha_1_t+38', 'y_best_beta_1_t+38', 'y_best_omega_t+38', 'y_best_mu_t+38', 'y_best_nu_tgarch_t+38', 'rv_t+39', 'y_best_alpha_1_t+39', 'y_best_beta_1_t+39', 'y_best_omega_t+39', 'y_best_mu_t+39', 'y_best_nu_tgarch_t+39', 'rv_t+40', 'y_best_alpha_1_t+40', 'y_best_beta_1_t+40', 'y_best_omega_t+40', 'y_best_mu_t+40', 'y_best_nu_tgarch_t+40', 'rv_t+41', 'y_best_alpha_1_t+41', 'y_best_beta_1_t+41', 'y_best_omega_t+41', 'y_best_mu_t+41', 'y_best_nu_tgarch_t+41', 'rv_t+42', 'y_best_alpha_1_t+42', 'y_best_beta_1_t+42', 'y_best_omega_t+42', 'y_best_mu_t+42', 'y_best_nu_tgarch_t+42', 'rv_t+43', 'y_best_alpha_1_t+43', 'y_best_beta_1_t+43', 'y_best_omega_t+43', 'y_best_mu_t+43', 'y_best_nu_tgarch_t+43', 'rv_t+44', 'y_best_alpha_1_t+44', 'y_best_beta_1_t+44', 'y_best_omega_t+44', 'y_best_mu_t+44', 'y_best_nu_tgarch_t+44', 'rv_t+45', 'y_best_alpha_1_t+45', 'y_best_beta_1_t+45', 'y_best_omega_t+45', 'y_best_mu_t+45', 'y_best_nu_tgarch_t+45', 'rv_t+46', 'y_best_alpha_1_t+46', 'y_best_beta_1_t+46', 'y_best_omega_t+46', 'y_best_mu_t+46', 'y_best_nu_tgarch_t+46', 'rv_t+47', 'y_best_alpha_1_t+47', 'y_best_beta_1_t+47', 'y_best_omega_t+47', 'y_best_mu_t+47', 'y_best_nu_tgarch_t+47', 'rv_t+48', 'y_best_alpha_1_t+48', 'y_best_beta_1_t+48', 'y_best_omega_t+48', 'y_best_mu_t+48', 'y_best_nu_tgarch_t+48', 'rv_t+49', 'y_best_alpha_1_t+49', 'y_best_beta_1_t+49', 'y_best_omega_t+49', 'y_best_mu_t+49', 'y_best_nu_tgarch_t+49', 'rv_t+50', 'y_best_alpha_1_t+50', 'y_best_beta_1_t+50', 'y_best_omega_t+50', 'y_best_mu_t+50', 'y_best_nu_tgarch_t+50', 'rv_t+51', 'y_best_alpha_1_t+51', 'y_best_beta_1_t+51', 'y_best_omega_t+51', 'y_best_mu_t+51', 'y_best_nu_tgarch_t+51', 'rv_t+52', 'y_best_alpha_1_t+52', 'y_best_beta_1_t+52', 'y_best_omega_t+52', 'y_best_mu_t+52', 'y_best_nu_tgarch_t+52', 'rv_t+53', 'y_best_alpha_1_t+53', 'y_best_beta_1_t+53', 'y_best_omega_t+53', 'y_best_mu_t+53', 'y_best_nu_tgarch_t+53', 'rv_t+54', 'y_best_alpha_1_t+54', 'y_best_beta_1_t+54', 'y_best_omega_t+54', 'y_best_mu_t+54', 'y_best_nu_tgarch_t+54', 'rv_t+55', 'y_best_alpha_1_t+55', 'y_best_beta_1_t+55', 'y_best_omega_t+55', 'y_best_mu_t+55', 'y_best_nu_tgarch_t+55', 'rv_t+56', 'y_best_alpha_1_t+56', 'y_best_beta_1_t+56', 'y_best_omega_t+56', 'y_best_mu_t+56', 'y_best_nu_tgarch_t+56', 'rv_t+57', 'y_best_alpha_1_t+57', 'y_best_beta_1_t+57', 'y_best_omega_t+57', 'y_best_mu_t+57', 'y_best_nu_tgarch_t+57', 'rv_t+58', 'y_best_alpha_1_t+58', 'y_best_beta_1_t+58', 'y_best_omega_t+58', 'y_best_mu_t+58', 'y_best_nu_tgarch_t+58', 'rv_t+59', 'y_best_alpha_1_t+59', 'y_best_beta_1_t+59', 'y_best_omega_t+59', 'y_best_mu_t+59', 'y_best_nu_tgarch_t+59', 'rv_t+60', 'y_best_alpha_1_t+60', 'y_best_beta_1_t+60', 'y_best_omega_t+60', 'y_best_mu_t+60', 'y_best_nu_tgarch_t+60']
[  0.44148683   1.36512504   0.86625199 ... 112.40750435  30.95861336
  16.24492026]

Load data v9¶

In [15]:
load_data_object_9_file_path = os.path.join(root_folder, objects_relative_path, "structured_data_dict_9.pkl")

with open(load_data_object_9_file_path, "rb") as f:
    structured_data_dict_9 = pickle.load(f)

print("Data dictionary 9 loaded successfully.")

eur9_X_price = structured_data_dict_9["EURUSD"].get("X_other")
eur9_X_time = structured_data_dict_9["EURUSD"].get("X_time")
eur9_y = structured_data_dict_9["EURUSD"].get("y")
eur9_y_columns = structured_data_dict_9["EURUSD"].get("y_columns")
eur9_X_columns = structured_data_dict_9["EURUSD"].get("X_other_columns")

print(eur9_X_columns)
print(eur9_y_columns)


df_xprice = summarize_features(eur9_X_price, eur9_X_columns, name="X_price")
df_y = summarize_features(eur9_y, eur9_y_columns, name="y")
print(df_xprice.head(20))
print(df_y.head(20))
Data dictionary 9 loaded successfully.
['rv_t-59', 'x_best_pred_var_t-59', 'rv_t-58', 'x_best_pred_var_t-58', 'rv_t-57', 'x_best_pred_var_t-57', 'rv_t-56', 'x_best_pred_var_t-56', 'rv_t-55', 'x_best_pred_var_t-55', 'rv_t-54', 'x_best_pred_var_t-54', 'rv_t-53', 'x_best_pred_var_t-53', 'rv_t-52', 'x_best_pred_var_t-52', 'rv_t-51', 'x_best_pred_var_t-51', 'rv_t-50', 'x_best_pred_var_t-50', 'rv_t-49', 'x_best_pred_var_t-49', 'rv_t-48', 'x_best_pred_var_t-48', 'rv_t-47', 'x_best_pred_var_t-47', 'rv_t-46', 'x_best_pred_var_t-46', 'rv_t-45', 'x_best_pred_var_t-45', 'rv_t-44', 'x_best_pred_var_t-44', 'rv_t-43', 'x_best_pred_var_t-43', 'rv_t-42', 'x_best_pred_var_t-42', 'rv_t-41', 'x_best_pred_var_t-41', 'rv_t-40', 'x_best_pred_var_t-40', 'rv_t-39', 'x_best_pred_var_t-39', 'rv_t-38', 'x_best_pred_var_t-38', 'rv_t-37', 'x_best_pred_var_t-37', 'rv_t-36', 'x_best_pred_var_t-36', 'rv_t-35', 'x_best_pred_var_t-35', 'rv_t-34', 'x_best_pred_var_t-34', 'rv_t-33', 'x_best_pred_var_t-33', 'rv_t-32', 'x_best_pred_var_t-32', 'rv_t-31', 'x_best_pred_var_t-31', 'rv_t-30', 'x_best_pred_var_t-30', 'rv_t-29', 'x_best_pred_var_t-29', 'rv_t-28', 'x_best_pred_var_t-28', 'rv_t-27', 'x_best_pred_var_t-27', 'rv_t-26', 'x_best_pred_var_t-26', 'rv_t-25', 'x_best_pred_var_t-25', 'rv_t-24', 'x_best_pred_var_t-24', 'rv_t-23', 'x_best_pred_var_t-23', 'rv_t-22', 'x_best_pred_var_t-22', 'rv_t-21', 'x_best_pred_var_t-21', 'rv_t-20', 'x_best_pred_var_t-20', 'rv_t-19', 'x_best_pred_var_t-19', 'rv_t-18', 'x_best_pred_var_t-18', 'rv_t-17', 'x_best_pred_var_t-17', 'rv_t-16', 'x_best_pred_var_t-16', 'rv_t-15', 'x_best_pred_var_t-15', 'rv_t-14', 'x_best_pred_var_t-14', 'rv_t-13', 'x_best_pred_var_t-13', 'rv_t-12', 'x_best_pred_var_t-12', 'rv_t-11', 'x_best_pred_var_t-11', 'rv_t-10', 'x_best_pred_var_t-10', 'rv_t-9', 'x_best_pred_var_t-9', 'rv_t-8', 'x_best_pred_var_t-8', 'rv_t-7', 'x_best_pred_var_t-7', 'rv_t-6', 'x_best_pred_var_t-6', 'rv_t-5', 'x_best_pred_var_t-5', 'rv_t-4', 'x_best_pred_var_t-4', 'rv_t-3', 'x_best_pred_var_t-3', 'rv_t-2', 'x_best_pred_var_t-2', 'rv_t-1', 'x_best_pred_var_t-1', 'rv_t-0', 'x_best_pred_var_t-0']
['rv_t+1', 'y_best_pred_var_t+1', 'rv_t+2', 'y_best_pred_var_t+2', 'rv_t+3', 'y_best_pred_var_t+3', 'rv_t+4', 'y_best_pred_var_t+4', 'rv_t+5', 'y_best_pred_var_t+5', 'rv_t+6', 'y_best_pred_var_t+6', 'rv_t+7', 'y_best_pred_var_t+7', 'rv_t+8', 'y_best_pred_var_t+8', 'rv_t+9', 'y_best_pred_var_t+9', 'rv_t+10', 'y_best_pred_var_t+10', 'rv_t+11', 'y_best_pred_var_t+11', 'rv_t+12', 'y_best_pred_var_t+12', 'rv_t+13', 'y_best_pred_var_t+13', 'rv_t+14', 'y_best_pred_var_t+14', 'rv_t+15', 'y_best_pred_var_t+15', 'rv_t+16', 'y_best_pred_var_t+16', 'rv_t+17', 'y_best_pred_var_t+17', 'rv_t+18', 'y_best_pred_var_t+18', 'rv_t+19', 'y_best_pred_var_t+19', 'rv_t+20', 'y_best_pred_var_t+20', 'rv_t+21', 'y_best_pred_var_t+21', 'rv_t+22', 'y_best_pred_var_t+22', 'rv_t+23', 'y_best_pred_var_t+23', 'rv_t+24', 'y_best_pred_var_t+24', 'rv_t+25', 'y_best_pred_var_t+25', 'rv_t+26', 'y_best_pred_var_t+26', 'rv_t+27', 'y_best_pred_var_t+27', 'rv_t+28', 'y_best_pred_var_t+28', 'rv_t+29', 'y_best_pred_var_t+29', 'rv_t+30', 'y_best_pred_var_t+30', 'rv_t+31', 'y_best_pred_var_t+31', 'rv_t+32', 'y_best_pred_var_t+32', 'rv_t+33', 'y_best_pred_var_t+33', 'rv_t+34', 'y_best_pred_var_t+34', 'rv_t+35', 'y_best_pred_var_t+35', 'rv_t+36', 'y_best_pred_var_t+36', 'rv_t+37', 'y_best_pred_var_t+37', 'rv_t+38', 'y_best_pred_var_t+38', 'rv_t+39', 'y_best_pred_var_t+39', 'rv_t+40', 'y_best_pred_var_t+40', 'rv_t+41', 'y_best_pred_var_t+41', 'rv_t+42', 'y_best_pred_var_t+42', 'rv_t+43', 'y_best_pred_var_t+43', 'rv_t+44', 'y_best_pred_var_t+44', 'rv_t+45', 'y_best_pred_var_t+45', 'rv_t+46', 'y_best_pred_var_t+46', 'rv_t+47', 'y_best_pred_var_t+47', 'rv_t+48', 'y_best_pred_var_t+48', 'rv_t+49', 'y_best_pred_var_t+49', 'rv_t+50', 'y_best_pred_var_t+50', 'rv_t+51', 'y_best_pred_var_t+51', 'rv_t+52', 'y_best_pred_var_t+52', 'rv_t+53', 'y_best_pred_var_t+53', 'rv_t+54', 'y_best_pred_var_t+54', 'rv_t+55', 'y_best_pred_var_t+55', 'rv_t+56', 'y_best_pred_var_t+56', 'rv_t+57', 'y_best_pred_var_t+57', 'rv_t+58', 'y_best_pred_var_t+58', 'rv_t+59', 'y_best_pred_var_t+59', 'rv_t+60', 'y_best_pred_var_t+60']
[X_price] shape=(3782, 60, 2) | NaN: 0 | +Inf: 0 | -Inf: 0 | Any Inf: 0
[y] shape=(3782, 60, 2) | NaN: 0 | +Inf: 0 | -Inf: 0 | Any Inf: 0
  feature       min       p01    median       p99        max      mean  \
0      f1 -3.193956 -3.013444 -1.744387 -0.294217  23.025851 -1.694000   
1      f0  0.000096  0.002543  0.151064  1.340520   5.997252  0.220419   

        std  
0  0.709077  
1  0.287035  
  feature       min       p01    median       p99       max      mean  \
0      f0  0.000096  0.002543  0.151394  1.342547  5.997252  0.221915   
1      f1  0.041813  0.049371  0.174895  0.733523  1.309894  0.219821   

        std  
0  0.290001  
1  0.145982  

Load data v10¶

In [16]:
load_data_object_10_file_path = os.path.join(root_folder, objects_relative_path, "structured_data_dict_10.pkl")

with open(load_data_object_10_file_path, "rb") as f:
    structured_data_dict_10 = pickle.load(f)

print("Data dictionary 10 loaded successfully.")

eur10_X_price = structured_data_dict_10["EURUSD"].get("X_other")
eur10_X_time = structured_data_dict_10["EURUSD"].get("X_time")
eur10_y = structured_data_dict_10["EURUSD"].get("y")
eur10_y_columns = structured_data_dict_10["EURUSD"].get("y_columns")
eur10_X_columns = structured_data_dict_10["EURUSD"].get("X_other_columns")

print(eur10_X_columns)
print(eur10_y_columns)
print(eur10_y.shape)


df_xprice = summarize_features(eur10_X_price, eur10_X_columns, name="X_price")
df_y = summarize_features(eur10_y, eur10_y_columns, name="y")
print(df_xprice.head(20))
print(df_y.head(20))
Data dictionary 10 loaded successfully.
['rv_t-59', 'lg_return_t-59', 'rv_t-58', 'lg_return_t-58', 'rv_t-57', 'lg_return_t-57', 'rv_t-56', 'lg_return_t-56', 'rv_t-55', 'lg_return_t-55', 'rv_t-54', 'lg_return_t-54', 'rv_t-53', 'lg_return_t-53', 'rv_t-52', 'lg_return_t-52', 'rv_t-51', 'lg_return_t-51', 'rv_t-50', 'lg_return_t-50', 'rv_t-49', 'lg_return_t-49', 'rv_t-48', 'lg_return_t-48', 'rv_t-47', 'lg_return_t-47', 'rv_t-46', 'lg_return_t-46', 'rv_t-45', 'lg_return_t-45', 'rv_t-44', 'lg_return_t-44', 'rv_t-43', 'lg_return_t-43', 'rv_t-42', 'lg_return_t-42', 'rv_t-41', 'lg_return_t-41', 'rv_t-40', 'lg_return_t-40', 'rv_t-39', 'lg_return_t-39', 'rv_t-38', 'lg_return_t-38', 'rv_t-37', 'lg_return_t-37', 'rv_t-36', 'lg_return_t-36', 'rv_t-35', 'lg_return_t-35', 'rv_t-34', 'lg_return_t-34', 'rv_t-33', 'lg_return_t-33', 'rv_t-32', 'lg_return_t-32', 'rv_t-31', 'lg_return_t-31', 'rv_t-30', 'lg_return_t-30', 'rv_t-29', 'lg_return_t-29', 'rv_t-28', 'lg_return_t-28', 'rv_t-27', 'lg_return_t-27', 'rv_t-26', 'lg_return_t-26', 'rv_t-25', 'lg_return_t-25', 'rv_t-24', 'lg_return_t-24', 'rv_t-23', 'lg_return_t-23', 'rv_t-22', 'lg_return_t-22', 'rv_t-21', 'lg_return_t-21', 'rv_t-20', 'lg_return_t-20', 'rv_t-19', 'lg_return_t-19', 'rv_t-18', 'lg_return_t-18', 'rv_t-17', 'lg_return_t-17', 'rv_t-16', 'lg_return_t-16', 'rv_t-15', 'lg_return_t-15', 'rv_t-14', 'lg_return_t-14', 'rv_t-13', 'lg_return_t-13', 'rv_t-12', 'lg_return_t-12', 'rv_t-11', 'lg_return_t-11', 'rv_t-10', 'lg_return_t-10', 'rv_t-9', 'lg_return_t-9', 'rv_t-8', 'lg_return_t-8', 'rv_t-7', 'lg_return_t-7', 'rv_t-6', 'lg_return_t-6', 'rv_t-5', 'lg_return_t-5', 'rv_t-4', 'lg_return_t-4', 'rv_t-3', 'lg_return_t-3', 'rv_t-2', 'lg_return_t-2', 'rv_t-1', 'lg_return_t-1', 'rv_t-0', 'lg_return_t-0']
['rv_t+1', 'lg_return_t+1', 'rv_t+2', 'lg_return_t+2', 'rv_t+3', 'lg_return_t+3', 'rv_t+4', 'lg_return_t+4', 'rv_t+5', 'lg_return_t+5', 'rv_t+6', 'lg_return_t+6', 'rv_t+7', 'lg_return_t+7', 'rv_t+8', 'lg_return_t+8', 'rv_t+9', 'lg_return_t+9', 'rv_t+10', 'lg_return_t+10', 'rv_t+11', 'lg_return_t+11', 'rv_t+12', 'lg_return_t+12', 'rv_t+13', 'lg_return_t+13', 'rv_t+14', 'lg_return_t+14', 'rv_t+15', 'lg_return_t+15', 'rv_t+16', 'lg_return_t+16', 'rv_t+17', 'lg_return_t+17', 'rv_t+18', 'lg_return_t+18', 'rv_t+19', 'lg_return_t+19', 'rv_t+20', 'lg_return_t+20', 'rv_t+21', 'lg_return_t+21', 'rv_t+22', 'lg_return_t+22', 'rv_t+23', 'lg_return_t+23', 'rv_t+24', 'lg_return_t+24', 'rv_t+25', 'lg_return_t+25', 'rv_t+26', 'lg_return_t+26', 'rv_t+27', 'lg_return_t+27', 'rv_t+28', 'lg_return_t+28', 'rv_t+29', 'lg_return_t+29', 'rv_t+30', 'lg_return_t+30', 'rv_t+31', 'lg_return_t+31', 'rv_t+32', 'lg_return_t+32', 'rv_t+33', 'lg_return_t+33', 'rv_t+34', 'lg_return_t+34', 'rv_t+35', 'lg_return_t+35', 'rv_t+36', 'lg_return_t+36', 'rv_t+37', 'lg_return_t+37', 'rv_t+38', 'lg_return_t+38', 'rv_t+39', 'lg_return_t+39', 'rv_t+40', 'lg_return_t+40', 'rv_t+41', 'lg_return_t+41', 'rv_t+42', 'lg_return_t+42', 'rv_t+43', 'lg_return_t+43', 'rv_t+44', 'lg_return_t+44', 'rv_t+45', 'lg_return_t+45', 'rv_t+46', 'lg_return_t+46', 'rv_t+47', 'lg_return_t+47', 'rv_t+48', 'lg_return_t+48', 'rv_t+49', 'lg_return_t+49', 'rv_t+50', 'lg_return_t+50', 'rv_t+51', 'lg_return_t+51', 'rv_t+52', 'lg_return_t+52', 'rv_t+53', 'lg_return_t+53', 'rv_t+54', 'lg_return_t+54', 'rv_t+55', 'lg_return_t+55', 'rv_t+56', 'lg_return_t+56', 'rv_t+57', 'lg_return_t+57', 'rv_t+58', 'lg_return_t+58', 'rv_t+59', 'lg_return_t+59', 'rv_t+60', 'lg_return_t+60']
(3782, 60, 2)
[X_price] shape=(3782, 60, 2) | NaN: 0 | +Inf: 0 | -Inf: 0 | Any Inf: 0
[y] shape=(3782, 60, 2) | NaN: 0 | +Inf: 0 | -Inf: 0 | Any Inf: 0
  feature       min       p01    median       p99       max      mean  \
0      f0  0.000096  0.002543  0.151064  1.340520  5.997252  0.220419   
1      f1 -2.683668 -1.248301  0.002834  1.199063  2.855067 -0.004994   

        std  
0  0.287035  
1  0.445894  
  feature       min       p01    median       p99       max      mean  \
0      f0  0.000096  0.002543  0.151394  1.342547  5.997252  0.221915   
1      f1 -2.683668 -1.246691  0.001969  1.242566  2.855067 -0.003349   

        std  
0  0.290001  
1  0.447784  
In [17]:
aapl10_X_price = structured_data_dict_10["AAPL"].get("X_other")
aapl10_X_time = structured_data_dict_10["AAPL"].get("X_time")
aapl10_y = structured_data_dict_10["AAPL"].get("y")
aapl10_y_columns = structured_data_dict_10["AAPL"].get("y_columns")
aapl10_X_columns = structured_data_dict_10["AAPL"].get("X_other_columns")

Load data v11¶

In [18]:
load_data_object_11_file_path = os.path.join(root_folder, objects_relative_path, "structured_data_dict_11.pkl")

with open(load_data_object_11_file_path, "rb") as f:
    structured_data_dict_11 = pickle.load(f)

print("Data dictionary 11 loaded successfully.")

eur11_X_price = structured_data_dict_11["EURUSD"].get("X_other")
eur11_X_time = structured_data_dict_11["EURUSD"].get("X_time")
eur11_y = structured_data_dict_11["EURUSD"].get("y")
eur11_y_columns = structured_data_dict_11["EURUSD"].get("y_columns")
eur11_X_columns = structured_data_dict_11["EURUSD"].get("X_other_columns")

print(eur11_X_columns)
print(eur11_y_columns)


df_xprice = summarize_features(eur11_X_price, eur11_X_columns, name="X_price")
df_y = summarize_features(eur11_y, eur11_y_columns, name="y")
print(df_xprice.head(20))
print(df_y.head(20))
Data dictionary 11 loaded successfully.
['rv_t-59', 'x_best_alpha_1_t-59', 'x_best_beta_1_t-59', 'x_best_omega_t-59', 'x_best_mu_t-59', 'x_best_nu_tgarch_t-59', 'x_best_pred_var_t-59', 'rv_t-58', 'x_best_alpha_1_t-58', 'x_best_beta_1_t-58', 'x_best_omega_t-58', 'x_best_mu_t-58', 'x_best_nu_tgarch_t-58', 'x_best_pred_var_t-58', 'rv_t-57', 'x_best_alpha_1_t-57', 'x_best_beta_1_t-57', 'x_best_omega_t-57', 'x_best_mu_t-57', 'x_best_nu_tgarch_t-57', 'x_best_pred_var_t-57', 'rv_t-56', 'x_best_alpha_1_t-56', 'x_best_beta_1_t-56', 'x_best_omega_t-56', 'x_best_mu_t-56', 'x_best_nu_tgarch_t-56', 'x_best_pred_var_t-56', 'rv_t-55', 'x_best_alpha_1_t-55', 'x_best_beta_1_t-55', 'x_best_omega_t-55', 'x_best_mu_t-55', 'x_best_nu_tgarch_t-55', 'x_best_pred_var_t-55', 'rv_t-54', 'x_best_alpha_1_t-54', 'x_best_beta_1_t-54', 'x_best_omega_t-54', 'x_best_mu_t-54', 'x_best_nu_tgarch_t-54', 'x_best_pred_var_t-54', 'rv_t-53', 'x_best_alpha_1_t-53', 'x_best_beta_1_t-53', 'x_best_omega_t-53', 'x_best_mu_t-53', 'x_best_nu_tgarch_t-53', 'x_best_pred_var_t-53', 'rv_t-52', 'x_best_alpha_1_t-52', 'x_best_beta_1_t-52', 'x_best_omega_t-52', 'x_best_mu_t-52', 'x_best_nu_tgarch_t-52', 'x_best_pred_var_t-52', 'rv_t-51', 'x_best_alpha_1_t-51', 'x_best_beta_1_t-51', 'x_best_omega_t-51', 'x_best_mu_t-51', 'x_best_nu_tgarch_t-51', 'x_best_pred_var_t-51', 'rv_t-50', 'x_best_alpha_1_t-50', 'x_best_beta_1_t-50', 'x_best_omega_t-50', 'x_best_mu_t-50', 'x_best_nu_tgarch_t-50', 'x_best_pred_var_t-50', 'rv_t-49', 'x_best_alpha_1_t-49', 'x_best_beta_1_t-49', 'x_best_omega_t-49', 'x_best_mu_t-49', 'x_best_nu_tgarch_t-49', 'x_best_pred_var_t-49', 'rv_t-48', 'x_best_alpha_1_t-48', 'x_best_beta_1_t-48', 'x_best_omega_t-48', 'x_best_mu_t-48', 'x_best_nu_tgarch_t-48', 'x_best_pred_var_t-48', 'rv_t-47', 'x_best_alpha_1_t-47', 'x_best_beta_1_t-47', 'x_best_omega_t-47', 'x_best_mu_t-47', 'x_best_nu_tgarch_t-47', 'x_best_pred_var_t-47', 'rv_t-46', 'x_best_alpha_1_t-46', 'x_best_beta_1_t-46', 'x_best_omega_t-46', 'x_best_mu_t-46', 'x_best_nu_tgarch_t-46', 'x_best_pred_var_t-46', 'rv_t-45', 'x_best_alpha_1_t-45', 'x_best_beta_1_t-45', 'x_best_omega_t-45', 'x_best_mu_t-45', 'x_best_nu_tgarch_t-45', 'x_best_pred_var_t-45', 'rv_t-44', 'x_best_alpha_1_t-44', 'x_best_beta_1_t-44', 'x_best_omega_t-44', 'x_best_mu_t-44', 'x_best_nu_tgarch_t-44', 'x_best_pred_var_t-44', 'rv_t-43', 'x_best_alpha_1_t-43', 'x_best_beta_1_t-43', 'x_best_omega_t-43', 'x_best_mu_t-43', 'x_best_nu_tgarch_t-43', 'x_best_pred_var_t-43', 'rv_t-42', 'x_best_alpha_1_t-42', 'x_best_beta_1_t-42', 'x_best_omega_t-42', 'x_best_mu_t-42', 'x_best_nu_tgarch_t-42', 'x_best_pred_var_t-42', 'rv_t-41', 'x_best_alpha_1_t-41', 'x_best_beta_1_t-41', 'x_best_omega_t-41', 'x_best_mu_t-41', 'x_best_nu_tgarch_t-41', 'x_best_pred_var_t-41', 'rv_t-40', 'x_best_alpha_1_t-40', 'x_best_beta_1_t-40', 'x_best_omega_t-40', 'x_best_mu_t-40', 'x_best_nu_tgarch_t-40', 'x_best_pred_var_t-40', 'rv_t-39', 'x_best_alpha_1_t-39', 'x_best_beta_1_t-39', 'x_best_omega_t-39', 'x_best_mu_t-39', 'x_best_nu_tgarch_t-39', 'x_best_pred_var_t-39', 'rv_t-38', 'x_best_alpha_1_t-38', 'x_best_beta_1_t-38', 'x_best_omega_t-38', 'x_best_mu_t-38', 'x_best_nu_tgarch_t-38', 'x_best_pred_var_t-38', 'rv_t-37', 'x_best_alpha_1_t-37', 'x_best_beta_1_t-37', 'x_best_omega_t-37', 'x_best_mu_t-37', 'x_best_nu_tgarch_t-37', 'x_best_pred_var_t-37', 'rv_t-36', 'x_best_alpha_1_t-36', 'x_best_beta_1_t-36', 'x_best_omega_t-36', 'x_best_mu_t-36', 'x_best_nu_tgarch_t-36', 'x_best_pred_var_t-36', 'rv_t-35', 'x_best_alpha_1_t-35', 'x_best_beta_1_t-35', 'x_best_omega_t-35', 'x_best_mu_t-35', 'x_best_nu_tgarch_t-35', 'x_best_pred_var_t-35', 'rv_t-34', 'x_best_alpha_1_t-34', 'x_best_beta_1_t-34', 'x_best_omega_t-34', 'x_best_mu_t-34', 'x_best_nu_tgarch_t-34', 'x_best_pred_var_t-34', 'rv_t-33', 'x_best_alpha_1_t-33', 'x_best_beta_1_t-33', 'x_best_omega_t-33', 'x_best_mu_t-33', 'x_best_nu_tgarch_t-33', 'x_best_pred_var_t-33', 'rv_t-32', 'x_best_alpha_1_t-32', 'x_best_beta_1_t-32', 'x_best_omega_t-32', 'x_best_mu_t-32', 'x_best_nu_tgarch_t-32', 'x_best_pred_var_t-32', 'rv_t-31', 'x_best_alpha_1_t-31', 'x_best_beta_1_t-31', 'x_best_omega_t-31', 'x_best_mu_t-31', 'x_best_nu_tgarch_t-31', 'x_best_pred_var_t-31', 'rv_t-30', 'x_best_alpha_1_t-30', 'x_best_beta_1_t-30', 'x_best_omega_t-30', 'x_best_mu_t-30', 'x_best_nu_tgarch_t-30', 'x_best_pred_var_t-30', 'rv_t-29', 'x_best_alpha_1_t-29', 'x_best_beta_1_t-29', 'x_best_omega_t-29', 'x_best_mu_t-29', 'x_best_nu_tgarch_t-29', 'x_best_pred_var_t-29', 'rv_t-28', 'x_best_alpha_1_t-28', 'x_best_beta_1_t-28', 'x_best_omega_t-28', 'x_best_mu_t-28', 'x_best_nu_tgarch_t-28', 'x_best_pred_var_t-28', 'rv_t-27', 'x_best_alpha_1_t-27', 'x_best_beta_1_t-27', 'x_best_omega_t-27', 'x_best_mu_t-27', 'x_best_nu_tgarch_t-27', 'x_best_pred_var_t-27', 'rv_t-26', 'x_best_alpha_1_t-26', 'x_best_beta_1_t-26', 'x_best_omega_t-26', 'x_best_mu_t-26', 'x_best_nu_tgarch_t-26', 'x_best_pred_var_t-26', 'rv_t-25', 'x_best_alpha_1_t-25', 'x_best_beta_1_t-25', 'x_best_omega_t-25', 'x_best_mu_t-25', 'x_best_nu_tgarch_t-25', 'x_best_pred_var_t-25', 'rv_t-24', 'x_best_alpha_1_t-24', 'x_best_beta_1_t-24', 'x_best_omega_t-24', 'x_best_mu_t-24', 'x_best_nu_tgarch_t-24', 'x_best_pred_var_t-24', 'rv_t-23', 'x_best_alpha_1_t-23', 'x_best_beta_1_t-23', 'x_best_omega_t-23', 'x_best_mu_t-23', 'x_best_nu_tgarch_t-23', 'x_best_pred_var_t-23', 'rv_t-22', 'x_best_alpha_1_t-22', 'x_best_beta_1_t-22', 'x_best_omega_t-22', 'x_best_mu_t-22', 'x_best_nu_tgarch_t-22', 'x_best_pred_var_t-22', 'rv_t-21', 'x_best_alpha_1_t-21', 'x_best_beta_1_t-21', 'x_best_omega_t-21', 'x_best_mu_t-21', 'x_best_nu_tgarch_t-21', 'x_best_pred_var_t-21', 'rv_t-20', 'x_best_alpha_1_t-20', 'x_best_beta_1_t-20', 'x_best_omega_t-20', 'x_best_mu_t-20', 'x_best_nu_tgarch_t-20', 'x_best_pred_var_t-20', 'rv_t-19', 'x_best_alpha_1_t-19', 'x_best_beta_1_t-19', 'x_best_omega_t-19', 'x_best_mu_t-19', 'x_best_nu_tgarch_t-19', 'x_best_pred_var_t-19', 'rv_t-18', 'x_best_alpha_1_t-18', 'x_best_beta_1_t-18', 'x_best_omega_t-18', 'x_best_mu_t-18', 'x_best_nu_tgarch_t-18', 'x_best_pred_var_t-18', 'rv_t-17', 'x_best_alpha_1_t-17', 'x_best_beta_1_t-17', 'x_best_omega_t-17', 'x_best_mu_t-17', 'x_best_nu_tgarch_t-17', 'x_best_pred_var_t-17', 'rv_t-16', 'x_best_alpha_1_t-16', 'x_best_beta_1_t-16', 'x_best_omega_t-16', 'x_best_mu_t-16', 'x_best_nu_tgarch_t-16', 'x_best_pred_var_t-16', 'rv_t-15', 'x_best_alpha_1_t-15', 'x_best_beta_1_t-15', 'x_best_omega_t-15', 'x_best_mu_t-15', 'x_best_nu_tgarch_t-15', 'x_best_pred_var_t-15', 'rv_t-14', 'x_best_alpha_1_t-14', 'x_best_beta_1_t-14', 'x_best_omega_t-14', 'x_best_mu_t-14', 'x_best_nu_tgarch_t-14', 'x_best_pred_var_t-14', 'rv_t-13', 'x_best_alpha_1_t-13', 'x_best_beta_1_t-13', 'x_best_omega_t-13', 'x_best_mu_t-13', 'x_best_nu_tgarch_t-13', 'x_best_pred_var_t-13', 'rv_t-12', 'x_best_alpha_1_t-12', 'x_best_beta_1_t-12', 'x_best_omega_t-12', 'x_best_mu_t-12', 'x_best_nu_tgarch_t-12', 'x_best_pred_var_t-12', 'rv_t-11', 'x_best_alpha_1_t-11', 'x_best_beta_1_t-11', 'x_best_omega_t-11', 'x_best_mu_t-11', 'x_best_nu_tgarch_t-11', 'x_best_pred_var_t-11', 'rv_t-10', 'x_best_alpha_1_t-10', 'x_best_beta_1_t-10', 'x_best_omega_t-10', 'x_best_mu_t-10', 'x_best_nu_tgarch_t-10', 'x_best_pred_var_t-10', 'rv_t-9', 'x_best_alpha_1_t-9', 'x_best_beta_1_t-9', 'x_best_omega_t-9', 'x_best_mu_t-9', 'x_best_nu_tgarch_t-9', 'x_best_pred_var_t-9', 'rv_t-8', 'x_best_alpha_1_t-8', 'x_best_beta_1_t-8', 'x_best_omega_t-8', 'x_best_mu_t-8', 'x_best_nu_tgarch_t-8', 'x_best_pred_var_t-8', 'rv_t-7', 'x_best_alpha_1_t-7', 'x_best_beta_1_t-7', 'x_best_omega_t-7', 'x_best_mu_t-7', 'x_best_nu_tgarch_t-7', 'x_best_pred_var_t-7', 'rv_t-6', 'x_best_alpha_1_t-6', 'x_best_beta_1_t-6', 'x_best_omega_t-6', 'x_best_mu_t-6', 'x_best_nu_tgarch_t-6', 'x_best_pred_var_t-6', 'rv_t-5', 'x_best_alpha_1_t-5', 'x_best_beta_1_t-5', 'x_best_omega_t-5', 'x_best_mu_t-5', 'x_best_nu_tgarch_t-5', 'x_best_pred_var_t-5', 'rv_t-4', 'x_best_alpha_1_t-4', 'x_best_beta_1_t-4', 'x_best_omega_t-4', 'x_best_mu_t-4', 'x_best_nu_tgarch_t-4', 'x_best_pred_var_t-4', 'rv_t-3', 'x_best_alpha_1_t-3', 'x_best_beta_1_t-3', 'x_best_omega_t-3', 'x_best_mu_t-3', 'x_best_nu_tgarch_t-3', 'x_best_pred_var_t-3', 'rv_t-2', 'x_best_alpha_1_t-2', 'x_best_beta_1_t-2', 'x_best_omega_t-2', 'x_best_mu_t-2', 'x_best_nu_tgarch_t-2', 'x_best_pred_var_t-2', 'rv_t-1', 'x_best_alpha_1_t-1', 'x_best_beta_1_t-1', 'x_best_omega_t-1', 'x_best_mu_t-1', 'x_best_nu_tgarch_t-1', 'x_best_pred_var_t-1', 'rv_t-0', 'x_best_alpha_1_t-0', 'x_best_beta_1_t-0', 'x_best_omega_t-0', 'x_best_mu_t-0', 'x_best_nu_tgarch_t-0', 'x_best_pred_var_t-0']
['rv_t+1', 'rv_t+2', 'rv_t+3', 'rv_t+4', 'rv_t+5', 'rv_t+6', 'rv_t+7', 'rv_t+8', 'rv_t+9', 'rv_t+10', 'rv_t+11', 'rv_t+12', 'rv_t+13', 'rv_t+14', 'rv_t+15', 'rv_t+16', 'rv_t+17', 'rv_t+18', 'rv_t+19', 'rv_t+20', 'rv_t+21', 'rv_t+22', 'rv_t+23', 'rv_t+24', 'rv_t+25', 'rv_t+26', 'rv_t+27', 'rv_t+28', 'rv_t+29', 'rv_t+30', 'rv_t+31', 'rv_t+32', 'rv_t+33', 'rv_t+34', 'rv_t+35', 'rv_t+36', 'rv_t+37', 'rv_t+38', 'rv_t+39', 'rv_t+40', 'rv_t+41', 'rv_t+42', 'rv_t+43', 'rv_t+44', 'rv_t+45', 'rv_t+46', 'rv_t+47', 'rv_t+48', 'rv_t+49', 'rv_t+50', 'rv_t+51', 'rv_t+52', 'rv_t+53', 'rv_t+54', 'rv_t+55', 'rv_t+56', 'rv_t+57', 'rv_t+58', 'rv_t+59', 'rv_t+60']
[X_price] shape=(3782, 60, 7) | NaN: 0 | +Inf: 0 | -Inf: 0 | Any Inf: 0
[y] shape=(3782, 60, 1) | NaN: 0 | +Inf: 0 | -Inf: 0 | Any Inf: 0
  feature       min           p01    median         p99         max      mean  \
0      f5  0.000000  3.031849e+00  5.034044  106.962327  345.033304  7.723787   
1      f6 -3.193956 -3.013444e+00 -1.744387   -0.294217   23.025851 -1.694000   
2      f0  0.000096  2.542676e-03  0.151064    1.340520    5.997252  0.220419   
3      f2  0.000000  0.000000e+00  0.954145    1.000000    1.000000  0.817385   
4      f3  0.000000  9.629686e-10  0.003832    0.219574    0.424595  0.028887   
5      f1  0.000000  0.000000e+00  0.006112    0.136720    0.205071  0.024899   
6      f4 -0.071164 -6.033247e-02 -0.002668    0.037148    0.043256 -0.003317   

         std  
0  18.160206  
1   0.709077  
2   0.287035  
3   0.315141  
4   0.052974  
5   0.034081  
6   0.020569  
  feature       min       p01    median       p99       max      mean  \
0      f0  0.000096  0.002543  0.151394  1.342547  5.997252  0.221915   

        std  
0  0.290001  

GARCH baseline¶

GARCH wrapper¶

In [26]:
import warnings
from typing import Dict, List, Optional, Sequence, Tuple
import numpy as np
from arch import arch_model

def _norm_dist_name(d):
    d = str(d).lower()
    aliases = {"gaussian": "normal", "norm": "normal", "student": "t", "studentt": "t"}
    return aliases.get(d, d)

def _parse_cfg(cfg: Sequence):
    if len(cfg) == 4:
        mt, p, q, dist = cfg
        return str(mt).upper(), int(p), 0, int(q), _norm_dist_name(dist)
    elif len(cfg) == 5:
        mt, p, o, q, dist = cfg
        return str(mt).upper(), int(p), int(o), int(q), _norm_dist_name(dist)
    raise ValueError(f"cfg must be len 4 or 5; got {cfg}")

class VolBICWrapper:
    def __init__(self, mean_mode: str = "Zero", verbose: bool = False,
                 candidates: Optional[List[Sequence]] = None):

        self.candidates = candidates or [
            ("GARCH",    1, 1, "t"),
            ("GARCH",    2, 2, "t"),
            ("GARCH",    1, 1, "normal"),
            ("GJRGARCH", 1, 1, "t"),
            ("GJRGARCH", 1, 1, "normal"),
            ("EGARCH",   1, 1, "t"),        
            ("EGARCH",   1, 1, 1, "t"),     
            ("EGARCH",   1, 1, "normal"),
        ]
        self.mean_mode = mean_mode
        self.verbose = verbose

        self.best_config: Optional[Tuple] = None
        self.best_result = None
        self.best_params: Optional[Dict] = None
        self.best_bic: float = np.inf

    def _build(self, x: np.ndarray, cfg: Sequence):
        mt, p, o, q, dist = _parse_cfg(cfg)
        mean_str = str(self.mean_mode).lower()  

        if mt == "GARCH":
            return arch_model(x, mean=mean_str, vol="GARCH", p=p, q=q, dist=dist)
        if mt in ("GJRGARCH", "GJR-GARCH", "GJR"):
            o_ = 1 if o == 0 else o
            return arch_model(x, mean=mean_str, vol="GARCH", p=p, o=o_, q=q, dist=dist)
        if mt == "ARCH":
            return arch_model(x, mean=mean_str, vol="ARCH", p=p, dist=dist)
        if mt == "EGARCH":
            o_ = 1 if o == 0 else o
            return arch_model(x, mean=mean_str, vol="EGARCH", p=p, o=o_, q=q, dist=dist)
        raise ValueError(f"Unknown model type: {mt}")

    def _check_fitted(self):
        if self.best_result is None or self.best_config is None or self.best_params is None:
            raise RuntimeError("Model is not fitted. Call fit(x_fit) first.")

    def fit(self, x_fit: np.ndarray):
        x = np.asarray(x_fit, dtype=float).ravel()
        if x.size < 20:
            raise ValueError("x_fit is too short; need at least ~20 obs.")

        best_res, best_cfg, best_bic = None, None, np.inf

        for cfg in self.candidates:
            try:
                with warnings.catch_warnings():
                    warnings.simplefilter("ignore")
                    m = self._build(x, cfg)
                    res = m.fit(disp="off")
                    bic = float(res.bic)
                if np.isfinite(bic) and bic < best_bic:
                    best_res, best_cfg, best_bic = res, cfg, bic
            except Exception as e:
                if self.verbose:
                    print(f"[fit] {cfg} failed: {e}")

        if best_res is None:
            raise RuntimeError("All candidate models failed to fit.")

        self.best_result = best_res
        self.best_config = best_cfg
        self.best_bic = best_bic
        self.best_params = best_res.params.to_dict()

        if self.verbose:
            print(f"[fit] Best by BIC: {self.best_config}, BIC={self.best_bic:.3f}")
        return self

    def _forecast_variance(self, res, H: int, sim_paths: int = 0, random_state: Optional[int] = None):
        try:
            fc = res.forecast(horizon=int(H), reindex=False)
            return np.asarray(fc.variance.values[-1, :H], dtype=float)
        except Exception as e:
            if self.verbose:
                print(f"[forecast] analytic failed ({e}); trying simulation…")

        paths = int(sim_paths) if sim_paths and sim_paths > 0 else 1000
        last_err = None
        for kwargs in (
            dict(method="simulation", simulations=paths, reindex=False, random_state=random_state),
            dict(method="simulation", simulations=paths, reindex=False),
            dict(method="simulation", reps=paths, reindex=False), 
        ):
            try:
                fc = res.forecast(horizon=int(H), **kwargs)
                return np.asarray(fc.variance.values[-1, :H], dtype=float)
            except Exception as e:
                last_err = e
        raise RuntimeError(f"simulation forecast failed: {last_err}")

    def forecast_from_fit(self, steps: int, *, sim_paths: int = 0, random_state: Optional[int] = None) -> np.ndarray:
        self._check_fitted()
        return self._forecast_variance(self.best_result, steps, sim_paths=sim_paths, random_state=random_state)

    def forecast_on_new(self, x_new: np.ndarray, steps: int, *, sim_paths: int = 0, random_state: Optional[int] = None) -> np.ndarray:
        self._check_fitted()
        x_new = np.asarray(x_new, dtype=float).ravel()
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            m_new = self._build(x_new, self.best_config)
            res_fixed = m_new.fix(self.best_result.params)
        return self._forecast_variance(res_fixed, steps, sim_paths=sim_paths, random_state=random_state)

    def forecast_panel_on_new(self, x_new: np.ndarray, steps: int, start_anchor: int,
                              *, sim_paths: int = 0, random_state: Optional[int] = None) -> np.ndarray:
        self._check_fitted()
        x_new = np.asarray(x_new, dtype=float).ravel()
        H = int(steps); T = x_new.shape[0]
        if start_anchor < 0 or start_anchor >= T:
            raise ValueError(f"start_anchor must be in [0, {T-1}]")

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            m_new = self._build(x_new, self.best_config)
            res_fixed = m_new.fix(self.best_result.params)

           
            try:
                fc = res_fixed.forecast(horizon=H, start=start_anchor, reindex=False)
                arr = np.asarray(fc.variance.values, dtype=float)
                if arr.shape[0] > (T - start_anchor):
                    arr = arr[-(T - start_anchor):, :]
                return arr[:, :H]
            except Exception:
                pass

    
            for kwargs in (
                dict(method="simulation", simulations=(sim_paths or 1000), start=start_anchor, reindex=False, random_state=random_state),
                dict(method="simulation", simulations=(sim_paths or 1000), start=start_anchor, reindex=False),
                dict(method="simulation", reps=(sim_paths or 1000), start=start_anchor, reindex=False),
            ):
                try:
                    fc = res_fixed.forecast(horizon=H, **kwargs)
                    arr = np.asarray(fc.variance.values, dtype=float)
                    if arr.shape[0] > (T - start_anchor):
                        arr = arr[-(T - start_anchor):, :]
                    return arr[:, :H]
                except Exception:
                    continue

        
        n_anchors = T - start_anchor
        out = np.empty((n_anchors, H), dtype=float)
        for k, t in enumerate(range(start_anchor, T)):
            sub = x_new[:t + 1]
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                m_t = self._build(sub, self.best_config)
                res_t = m_t.fix(self.best_result.params)
            out[k, :] = self._forecast_variance(res_t, H, sim_paths=sim_paths, random_state=random_state)
        return out

    def get_best_config(self):
        return self.best_config

    def get_best_params(self) -> Dict:
        return dict(self.best_params) if self.best_params is not None else {}

    def state_dict(self) -> Dict:
        return {
            "mean_mode": str(self.mean_mode).lower(),
            "best_config": self.best_config,
            "best_params": self.get_best_params(),
            "best_bic": float(self.best_bic),
        }

GARCH evaluator¶

In [27]:
import numpy as np
import pandas as pd

def evaluate_vol_on_anchors_rolling_split(
    X_anchors,                
    Y_fwd,                   
    horizon,                 
    *,
    split_ratio=0.8,           
    roll_window=250,         
    mean_mode="Zero",          
    candidates=None,          
    sim_paths=2000,            
    horizons=(1, 3, 5, 10, 20, -1),
    epsilon=1e-12,
    verbose=True,
    pretty_print=True,
    qlike_mode="ratio",       
    qlike_floor="auto",
    qlike_calibrate=True,
):
    
    from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score

    x = np.asarray(X_anchors, dtype=float).ravel()
    Y = np.asarray(Y_fwd, dtype=float)
    if x.ndim != 1:
        raise ValueError("X_anchors must be 1D (B,).")
    if Y.ndim != 2:
        raise ValueError("Y_fwd must be 2D (B, T_fwd).")
    B = x.shape[0]
    if Y.shape[0] != B:
        raise ValueError(f"len(X)={B} != len(Y)={Y.shape[0]}.")
    if horizon < 1:
        raise ValueError("horizon must be >= 1.")
    if Y.shape[1] < horizon:
        raise ValueError("Y_fwd too short for horizon.")
    if roll_window < 20:
        raise ValueError("roll_window too small.")
    if roll_window > B:
        raise ValueError("roll_window > sample size.")


    n_train = max(20, int(np.floor(split_ratio * B)))
    if n_train >= B:
        raise ValueError(f"split_ratio too large: n_train={n_train} >= B={B}.")
    t0 = max(n_train - 1, roll_window - 1)  
    n_oos = B - t0

    if verbose:
        print(f"[vol rolling] B={B}, split={split_ratio} -> n_train={n_train}, "
              f"t0={t0}, anchors={n_oos}, H={horizon}, roll_window={roll_window}")

    y_pred = np.full((n_oos, horizon), np.nan, dtype=float)
    y_true = Y[t0:, :horizon].astype(float)


    def _auto_floor(arr):
        arr = np.asarray(arr, dtype=np.float64)
        pos = arr[np.isfinite(arr) & (arr > 0)]
        if pos.size == 0:
            return float(epsilon)
        base = np.percentile(pos, 5.0)
        return float(max(epsilon, 1e-6 * base))

    def _metric_block(yt_raw, yp_raw):
        mask = np.isfinite(yt_raw) & np.isfinite(yp_raw)
        if not np.any(mask):
            return np.nan, np.nan, np.nan, np.nan, np.nan
        yt = yt_raw[mask].astype(np.float64)
        yp = yp_raw[mask].astype(np.float64)

        mae  = mean_absolute_error(yt, yp)
        rmse = mean_squared_error(yt, yp) ** 0.5
        r2   = r2_score(yt, yp)

        ytm, ypm = yt - yt.mean(), yp - yp.mean()
        denom = np.sqrt((ytm**2).mean() * (ypm**2).mean())
        pr = float((ytm*ypm).mean() / denom) if denom > epsilon else np.nan

        floor = _auto_floor(yt) if qlike_floor == "auto" else max(float(qlike_floor), float(epsilon))
        yt_pos = np.clip(yt, floor, None)
        yp_pos = np.clip(yp, floor, None)
        if qlike_calibrate:
            c = float(np.mean(yt_pos / yp_pos))
            yp_pos = np.clip(yp_pos * c, floor, None)

        if qlike_mode == "ratio":
            r = yt_pos / yp_pos
            qlike = float(np.mean(r - np.log(r) - 1.0))
        elif qlike_mode == "log":
            qlike = float(np.mean(np.log(yp_pos) + (yt_pos / yp_pos)))
        else:
            raise ValueError("qlike_mode must be 'ratio' or 'log'")

        return float(mae), float(rmse), float(r2), pr, qlike

    for k, t in enumerate(range(t0, B)):
        left = t - roll_window + 1

        win = x[left : t + 1]

        mu = float(np.mean(win))
        sd = float(np.std(win))
        if not np.isfinite(sd) or sd <= 0:
            sd = 1.0
        win_std = (win - mu) / sd

        try:
            vw = VolBICWrapper(mean_mode=mean_mode, verbose=False, candidates=candidates).fit(win_std)
            v_std = vw.forecast_from_fit(horizon, sim_paths=sim_paths)
            y_pred[k, :] = (sd ** 2) * np.asarray(v_std, dtype=float)
        except Exception as e:
            if verbose:
                print(f"  anchor {t}: fit/forecast failed ({e}); skipping")

        if verbose and (k % max(1, n_oos // 5) == 0):
            show = y_pred[k, 0] if np.isfinite(y_pred[k, 0]) else np.nan
            print(f"  anchor {t}/{B-1}: sd={sd:.6g}, h1_pred={show:.6g}")


    results = {}
    max_h = horizon
    for h in horizons:
        label = f"{h} day(s)" if h > 0 else "full horizon"
        h_slice = h if h > 0 else max_h
        if h_slice > max_h:
            continue
        yt_slice = y_true[:, :h_slice].ravel()
        yp_slice = y_pred[:, :h_slice].ravel()
        mae, rmse, r2, pr, qlike = _metric_block(yt_slice, yp_slice)
        results[f"{label} MAE"]        = mae
        results[f"{label} RMSE"]       = rmse
        results[f"{label} R2"]         = r2
        results[f"{label} Pearson r"]  = pr
        results[f"{label} QLIKE"]      = qlike

    anchors = np.arange(t0, B, dtype=int)
    Hcol = np.arange(1, horizon + 1, dtype=int)
    A, Hh = np.meshgrid(anchors, Hcol, indexing="ij")
    panel_df = pd.DataFrame({
        "anchor": A.ravel(),
        "horizon": Hh.ravel(),
        "y_true": y_true.ravel(),
        "y_pred": np.clip(y_pred, epsilon, None).ravel(),
    })

    if verbose and pretty_print:
        metric_order = ["MAE", "RMSE", "R2", "Pearson r", "QLIKE"]
        labels = [f"{h} day(s)" if h > 0 else "full horizon"
                  for h in horizons if (h > 0 and h <= max_h) or h == -1]
        print("\n[metrics — vol rolling (ML-aligned split)]")
        for lbl in labels:
            print(f"\n{lbl}")
            for m in metric_order:
                key = f"{lbl} {m}"
                val = results.get(key, np.nan)
                print(f"{m:<10}: {val:.6f}" if np.isfinite(val) else f"{m:<10}: {val}")

    y_pair = np.stack([y_true, np.clip(y_pred, epsilon, None)], axis=-1)
    return results, None, None, panel_df, y_pair

EURUSD GARCH Test¶

GARCH data¶

In [29]:
print(eur10_X_price.shape)  
anchor_feature_idx = 1          
X_garch = eur10_X_price[:, 1, anchor_feature_idx]

print(X_garch.shape)

print(eur10_y.shape)  
y_garch = eur10_y[:, :, 1]               
print(y_garch.shape)
(3782, 60, 2)
(3782,)
(3782, 60, 2)
(3782, 60)

GARCH evaluation¶

In [ ]:
H=20
res, _, _, panel, y_pair = evaluate_vol_on_anchors_rolling_split(
    X_anchors=X_garch,
    Y_fwd=y_garch,
    horizon=H,
    split_ratio=0.8,
    roll_window=250,
    mean_mode="zero",
    candidates=[("GARCH", 1, 1, "t")],  
    sim_paths=2000,                        
    verbose=True
)
[vol rolling] B=3782, split=0.8 -> n_train=3025, t0=3024, anchors=758, H=20, roll_window=250
  anchor 3024/3781: sd=0.504062, h1_pred=0.343572
  anchor 3175/3781: sd=0.589105, h1_pred=0.384396
  anchor 3326/3781: sd=0.432523, h1_pred=0.155239
  anchor 3477/3781: sd=0.369827, h1_pred=0.144132
  anchor 3628/3781: sd=0.336784, h1_pred=0.108408
  anchor 3779/3781: sd=0.372649, h1_pred=0.214029

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 0.357293
RMSE      : 0.471094
R2        : -0.261556
Pearson r : 0.027083
QLIKE     : 7.959451

3 day(s)
MAE       : 0.356553
RMSE      : 0.470698
R2        : -0.262371
Pearson r : 0.032715
QLIKE     : 7.951059

5 day(s)
MAE       : 0.356405
RMSE      : 0.470518
R2        : -0.267211
Pearson r : 0.033994
QLIKE     : 7.955660

10 day(s)
MAE       : 0.357679
RMSE      : 0.472385
R2        : -0.279290
Pearson r : 0.027797
QLIKE     : 7.978474

20 day(s)
MAE       : 0.360014
RMSE      : 0.476171
R2        : -0.295258
Pearson r : 0.019077
QLIKE     : 7.995353

full horizon
MAE       : 0.360014
RMSE      : 0.476171
R2        : -0.295258
Pearson r : 0.019077
QLIKE     : 7.995353

GARCH APPL10 data¶

In [40]:
print(aapl10_X_price.shape)  
X_garch = aapl10_X_price[:, 55, 1]   
print(X_garch.shape)

print(aapl10_y.shape)  
y_garch = aapl10_y[:, :, 1]               
print(y_garch.shape)
(2018, 60, 2)
(2018,)
(2018, 60, 2)
(2018, 60)

GARCH APPL10 test¶

In [41]:
H=20
res, _, _, panel, y_pair = evaluate_vol_on_anchors_rolling_split(
    X_anchors=X_garch,
    Y_fwd=y_garch,
    horizon=H,
    split_ratio=0.8,
    roll_window=200,
    mean_mode="zero",
    candidates=[("GARCH", 1, 1, "t")],  
    sim_paths=2000,                         
    verbose=True
)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=20, roll_window=200
  anchor 1613/2017: sd=1.59174, h1_pred=1.16246
  anchor 1694/2017: sd=1.19384, h1_pred=1.37501
  anchor 1775/2017: sd=1.2563, h1_pred=2.00446
  anchor 1856/2017: sd=1.46078, h1_pred=2.26103
  anchor 1937/2017: sd=1.46966, h1_pred=1.93182

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 2.194304
RMSE      : 2.857805
R2        : -1.652279
Pearson r : 0.006932
QLIKE     : 7.205541

3 day(s)
MAE       : 2.163011
RMSE      : 2.763339
R2        : -1.474063
Pearson r : 0.030918
QLIKE     : 7.200926

5 day(s)
MAE       : 2.157527
RMSE      : 2.761844
R2        : -1.462521
Pearson r : -0.000670
QLIKE     : 7.219132

10 day(s)
MAE       : 2.142096
RMSE      : 2.736135
R2        : -1.409446
Pearson r : -0.004432
QLIKE     : 7.215343

20 day(s)
MAE       : 2.129420
RMSE      : 2.719979
R2        : -1.367259
Pearson r : -0.018326
QLIKE     : 7.182576

full horizon
MAE       : 2.129420
RMSE      : 2.719979
R2        : -1.367259
Pearson r : -0.018326
QLIKE     : 7.182576

GARCH Predicted data test¶

In [ ]:
y_pair
Out[ ]:
array([[[ 0.11717087,  1.16245878],
        [ 0.84276862,  1.16074113],
        [ 0.12659565,  1.15905942],
        ...,
        [-2.36765456,  1.13767022],
        [-0.89372364,  1.13647119],
        [ 0.15243905,  1.13529724]],

       [[ 0.84276862,  1.17721259],
        [ 0.12659565,  1.17586587],
        [-3.64496484,  1.17454993],
        ...,
        [-0.89372364,  1.15806247],
        [ 0.15243905,  1.15715348],
        [ 0.30418274,  1.15626527]],

       [[ 0.12659565,  1.17326081],
        [-3.64496484,  1.17214911],
        [-2.96856486,  1.1710646 ],
        ...,
        [ 0.15243905,  1.15764422],
        [ 0.30418274,  1.15691451],
        [ 1.47266112,  1.15620266]],

       ...,

       [[14.26174109,  2.42282546],
        [-4.33186128,  2.41168568],
        [ 3.97921608,  2.40112041],
        ...,
        [-3.19639323,  2.29463115],
        [-0.19124314,  2.29010283],
        [-1.145012  ,  2.28580805]],

       [[-4.33186128,  6.6472081 ],
        [ 3.97921608,  6.5599066 ],
        [ 2.18143275,  6.47474973],
        ...,
        [-0.19124314,  5.42181881],
        [-1.145012  ,  5.36461985],
        [ 0.62985935,  5.30882602]],

       [[ 3.97921608, 12.92110991],
        [ 2.18143275, 12.6706829 ],
        [-0.18781205, 12.42726777],
        ...,
        [-1.145012  ,  9.49614457],
        [ 0.62985935,  9.34161567],
        [ 0.52522721,  9.19141355]]], shape=(405, 20, 2))

NN model architectures¶

MLP models¶

Vanila MLP¶

Vanilla MLP architecture¶

In [119]:
import torch
import torch.nn as nn

class SimpleMLP(nn.Module):
    def __init__(self, input_dim, output_dim, hidden_layers=6, hidden_dim=64, dropout=0.0, no_tasks=1):
        super().__init__()
        layers = []
        layers.append(nn.Linear(input_dim, hidden_dim))
        layers.append(nn.LayerNorm(hidden_dim))
        layers.append(nn.GELU())
        layers.append(nn.Dropout(dropout))
        for _ in range(hidden_layers):
            layers.append(nn.Linear(hidden_dim, hidden_dim))
            layers.append(nn.LayerNorm(hidden_dim))
            layers.append(nn.GELU())
            layers.append(nn.Dropout(dropout))
        layers.append(nn.Linear(hidden_dim, output_dim))  
        self.model = nn.Sequential(*layers)

    def forward(self, x):
        return self.model(x)

Vanilla MLP wrapper¶

In [120]:
from sklearn.base import BaseEstimator, RegressorMixin
import math
import torch
import torch.nn as nn
from torch.optim import Adam

class SimpleMLPWrapper(BaseEstimator, RegressorMixin):
    def __init__(
        self,
        input_dim,
        output_dim,
        *,
        dropout=0.0,
        lr=3e-4,
        epochs=50,
        batch_size=64,
        device=None,
        verbose=True,
        hidden_layers=3,
        no_tasks=1,
        l2_weight=1e-6,
        patience=10,
        min_epochs=30,
        min_delta=1e-4,
        hidden_dim=256,
 
        loss_type="gauss_nll_var",              
        target_is_logvar=False,  
        nll_eps=1e-12,               
        clamp_logvar_min=-20.0,      
        clamp_logvar_max=20.0,
        student_df=5.0,               
    ):
        self.input_dim = int(input_dim)
        self.output_dim = int(output_dim)
        self.dropout = float(dropout)
        self.lr = float(lr)
        self.epochs = int(epochs)
        self.batch_size = int(batch_size)
        self.device = device or (
            "mps" if torch.backends.mps.is_available()
            else "cuda" if torch.cuda.is_available()
            else "cpu"
        )
        self.verbose = bool(verbose)
        self.hidden_layers = int(hidden_layers)
        self.no_tasks = int(no_tasks)
        self.l2_weight = float(l2_weight)
        self.patience = int(patience)
        self.min_epochs = int(min_epochs)
        self.min_delta = float(min_delta)
        self.hidden_dim = int(hidden_dim)


        self.loss_type = str(loss_type)
        self.target_is_logvar = bool(target_is_logvar)
        self.nll_eps = float(nll_eps)
        self.clamp_logvar_min = float(clamp_logvar_min)
        self.clamp_logvar_max = float(clamp_logvar_max)
        self.student_df = float(student_df)

        self._build()
        if self.verbose:
            print(f"Using device: {self.device}")

    def _build(self):
        self.model = SimpleMLP(
            input_dim=self.input_dim,
            output_dim=self.output_dim,
            hidden_layers=self.hidden_layers,
            hidden_dim=self.hidden_dim,
            dropout=self.dropout,
            no_tasks=self.no_tasks,
        ).to(self.device)

        self.optimizer = Adam(self.model.parameters(), lr=self.lr, weight_decay=self.l2_weight)
        self._mse = nn.MSELoss()
        self._extras = {}

    def _batches(self, X, y, bs):
        n = X.shape[0]
        for i in range(0, n, bs):
            yield X[i:i + bs], y[i:i + bs]

  
    def _compute_loss(self, y_hat, y_true):

        if self.loss_type == "mse":
            return self._mse(y_hat, y_true)

        z = torch.clamp(y_hat, self.clamp_logvar_min, self.clamp_logvar_max)

        if self.loss_type == "gauss_nll_var":
            if self.target_is_logvar:
                v = torch.exp(y_true)                      
            else:
                v = y_true
            v = torch.clamp(v, min=self.nll_eps)
            loss = v * torch.exp(-z) + z                  
            return loss.mean()

        elif self.loss_type == "student_t_nll_var":
            if self.target_is_logvar:
                v = torch.exp(y_true)
            else:
                v = y_true
            v = torch.clamp(v, min=self.nll_eps)
            nu = torch.tensor(self.student_df, device=z.device, dtype=z.dtype)
            loss = 0.5*(nu + 1.0) * torch.log1p(v / (nu * torch.exp(z))) + 0.5*z
            return loss.mean()

        else:
            raise ValueError(f"Unknown loss_type: {self.loss_type}")

    def fit(self, X_train, y_train, X_val=None, y_val=None):
        X_train = torch.as_tensor(X_train, dtype=torch.float32, device=self.device)
        y_train = torch.as_tensor(y_train, dtype=torch.float32, device=self.device)
        if X_val is not None:
            X_val = torch.as_tensor(X_val, dtype=torch.float32, device=self.device)
            y_val = torch.as_tensor(y_val, dtype=torch.float32, device=self.device)

        best_val = math.inf
        best_state = None
        epochs_no_improve = 0

        for epoch in range(1, self.epochs + 1):
            self.model.train()
            train_loss = 0.0
            nb = 0

            for xb, yb in self._batches(X_train, y_train, self.batch_size):
                self.optimizer.zero_grad(set_to_none=True)
                y_hat = self.model(xb)
                loss = self._compute_loss(y_hat, yb)
                loss.backward()
                self.optimizer.step()
                train_loss += float(loss.detach().cpu())
                nb += 1

            train_loss /= max(1, nb)

            if X_val is not None:
                self.model.eval()
                with torch.no_grad():
                    y_hat_v = self.model(X_val)
                    val_loss = float(self._compute_loss(y_hat_v, y_val).detach().cpu())

                if self.verbose:
                    print(f"Epoch {epoch}: Train {self.loss_type} = {train_loss:.4f} | "
                          f"Val {self.loss_type} = {val_loss:.4f}")

                improved = (best_val - val_loss) > self.min_delta
                if improved:
                    best_val = val_loss
                    epochs_no_improve = 0
                    best_state = {k: v.detach().cpu() for k, v in self.model.state_dict().items()}
                else:
                    epochs_no_improve += 1

                if (epoch >= self.min_epochs) and (epochs_no_improve >= self.patience):
                    if self.verbose:
                        print(f"Early stopping triggered at epoch {epoch}.")
                    break
            else:
                if self.verbose:
                    print(f"Epoch {epoch}: Train {self.loss_type} = {train_loss:.4f}")

        if best_state is not None:
            self.model.load_state_dict(best_state)

        return self

    def predict(self, X):
        self.model.eval()
        X = torch.as_tensor(X, dtype=torch.float32, device=self.device)
        with torch.no_grad():
            return self.model(X).detach().cpu().numpy()

    def get_params(self, deep=True):
        return dict(
            input_dim=self.input_dim,
            output_dim=self.output_dim,
            dropout=self.dropout,
            lr=self.lr,
            epochs=self.epochs,
            batch_size=self.batch_size,
            device=self.device,
            verbose=self.verbose,
            hidden_layers=self.hidden_layers,
            no_tasks=self.no_tasks,
            l2_weight=self.l2_weight,
            patience=self.patience,
            min_epochs=self.min_epochs,
            min_delta=self.min_delta,
            hidden_dim=self.hidden_dim,
            loss_type=self.loss_type,
            target_is_logvar=self.target_is_logvar,
            nll_eps=self.nll_eps,
            clamp_logvar_min=self.clamp_logvar_min,
            clamp_logvar_max=self.clamp_logvar_max,
            student_df=self.student_df,
        )

    def set_params(self, **params):
        arch_keys = {
            "input_dim", "output_dim", "hidden_layers", "hidden_dim",
            "dropout", "no_tasks", "device"
        }
        need_rebuild = any(k in arch_keys for k in params)

        for k, v in params.items():
            if hasattr(self, k):
                setattr(self, k, v)

        if need_rebuild:
            self._build()
        else:
            for g in self.optimizer.param_groups:
                g["lr"] = self.lr
                g["weight_decay"] = self.l2_weight

        return self

Test 1 task EURUSD Vanilla MLP without CV¶

In [117]:
import os

save_test_SMLP_model_EURUSD_1_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_SMLP_model_1_task.pkl"
)

t1_EURUSD_SMLP_results, t1_EURUSD_SMLP_nested_results, t1_EURUSD_SMLP_best_model, t1_EURUSD_SMLP_best_params, _  = train_and_evaluate_model(
    model_type="Simple_MLP",
    X_price=eur2_X_price,
    X_time=eur2_X_time,
    y=eur2_y,
    no_tasks=1,
    use_nested_cv=False,
    flatten=True,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,
    save_model_path=save_test_SMLP_model_EURUSD_1_file_path,
    lr=1e-4,
    epochs=50,
    batch_size=64,
    verbose=True,
    time_horizon=30,
    dropout=0,
    l2_weight=1e-6,
    patience=10,
    min_epochs=90,
    hidden_dim=32,
    hidden_layers=3
    )
[mode=log_var_ratio] loss_type=mse, target_is_logvar=False, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 30
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525526
  Min value:  -3.1812809750988515
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_var_ratio scaled):
Shape: (2723, 30, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.6622325934971935
  Min value:  -4.5885956391503315
Checking X_price_val:
Shape: (302, 60, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.40892116517157
  Min value:  -2.633090998920803
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_var_ratio scaled):
Shape: (302, 30, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.461737368643011
  Min value:  -4.144793470690054
Checking X_price_test:
Shape: (757, 60, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144347
  Min value:  -2.633090998920803
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_var_ratio scaled):
Shape: (757, 30, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.7626499407155074
  Min value:  -4.740813716646197
Epoch 1: Train mse = 1.1009 | Val mse = 1.0928
Epoch 2: Train mse = 1.0430 | Val mse = 1.0542
Epoch 3: Train mse = 1.0071 | Val mse = 1.0202
Epoch 4: Train mse = 0.9798 | Val mse = 0.9923
Epoch 5: Train mse = 0.9588 | Val mse = 0.9696
Epoch 6: Train mse = 0.9419 | Val mse = 0.9486
Epoch 7: Train mse = 0.9269 | Val mse = 0.9258
Epoch 8: Train mse = 0.9124 | Val mse = 0.9030
Epoch 9: Train mse = 0.8967 | Val mse = 0.8735
Epoch 10: Train mse = 0.8822 | Val mse = 0.8593
Epoch 11: Train mse = 0.8666 | Val mse = 0.8250
Epoch 12: Train mse = 0.8532 | Val mse = 0.8324
Epoch 13: Train mse = 0.8389 | Val mse = 0.7840
Epoch 14: Train mse = 0.8210 | Val mse = 0.8002
Epoch 15: Train mse = 0.8091 | Val mse = 0.7487
Epoch 16: Train mse = 0.7933 | Val mse = 0.7750
Epoch 17: Train mse = 0.7853 | Val mse = 0.7197
Epoch 18: Train mse = 0.7691 | Val mse = 0.7584
Epoch 19: Train mse = 0.7572 | Val mse = 0.6871
Epoch 20: Train mse = 0.7338 | Val mse = 0.7018
Epoch 21: Train mse = 0.7215 | Val mse = 0.6619
Epoch 22: Train mse = 0.7060 | Val mse = 0.6727
Epoch 23: Train mse = 0.6922 | Val mse = 0.6452
Epoch 24: Train mse = 0.6774 | Val mse = 0.6439
Epoch 25: Train mse = 0.6651 | Val mse = 0.6378
Epoch 26: Train mse = 0.6544 | Val mse = 0.6195
Epoch 27: Train mse = 0.6393 | Val mse = 0.6150
Epoch 28: Train mse = 0.6257 | Val mse = 0.6195
Epoch 29: Train mse = 0.6172 | Val mse = 0.5998
Epoch 30: Train mse = 0.6042 | Val mse = 0.5991
Epoch 31: Train mse = 0.5898 | Val mse = 0.5764
Epoch 32: Train mse = 0.5771 | Val mse = 0.5550
Epoch 33: Train mse = 0.5694 | Val mse = 0.5465
Epoch 34: Train mse = 0.5615 | Val mse = 0.5594
Epoch 35: Train mse = 0.5460 | Val mse = 0.5335
Epoch 36: Train mse = 0.5388 | Val mse = 0.5931
Epoch 37: Train mse = 0.5487 | Val mse = 0.5139
Epoch 38: Train mse = 0.5199 | Val mse = 0.5494
Epoch 39: Train mse = 0.5128 | Val mse = 0.4941
Epoch 40: Train mse = 0.5014 | Val mse = 0.5370
Epoch 41: Train mse = 0.4916 | Val mse = 0.4822
Epoch 42: Train mse = 0.4782 | Val mse = 0.5232
Epoch 43: Train mse = 0.4726 | Val mse = 0.4816
Epoch 44: Train mse = 0.4611 | Val mse = 0.4796
Epoch 45: Train mse = 0.4513 | Val mse = 0.5156
Epoch 46: Train mse = 0.4541 | Val mse = 0.4462
Epoch 47: Train mse = 0.4412 | Val mse = 0.4921
Epoch 48: Train mse = 0.4324 | Val mse = 0.4671
Epoch 49: Train mse = 0.4329 | Val mse = 0.4328
Epoch 50: Train mse = 0.4546 | Val mse = 0.5877

Parameters used in the single-fit model:
input_dim: 360
output_dim: 30
dropout: 0.00000000
lr: 0.00010000
epochs: 50
batch_size: 64
device: mps
verbose: True
hidden_layers: 3
no_tasks: 1
l2_weight: 0.00000100
patience: 10
min_epochs: 90
min_delta: 0.00010000
hidden_dim: 32
loss_type: mse
target_is_logvar: False
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.13181230
1 day(s) RMSE                      : 0.22407982
1 day(s) R2                        : -0.10008904
1 day(s) Pearson r                 : 0.33309319
1 day(s) QLIKE                     : 0.57519066
3 day(s) MAE                       : 0.13826405
3 day(s) RMSE                      : 0.23401201
3 day(s) R2                        : -0.20902772
3 day(s) Pearson r                 : 0.29530543
3 day(s) QLIKE                     : 0.59422919
5 day(s) MAE                       : 0.13404594
5 day(s) RMSE                      : 0.22901049
5 day(s) R2                        : -0.17105693
5 day(s) Pearson r                 : 0.31226600
5 day(s) QLIKE                     : 0.58462768
10 day(s) MAE                      : 0.13612855
10 day(s) RMSE                     : 0.23167728
10 day(s) R2                       : -0.20992958
10 day(s) Pearson r                : 0.29192773
10 day(s) QLIKE                    : 0.61735417
20 day(s) MAE                      : 0.13834597
20 day(s) RMSE                     : 0.23553788
20 day(s) R2                       : -0.26533896
20 day(s) Pearson r                : 0.25850175
20 day(s) QLIKE                    : 0.63388486
full horizon MAE                   : 0.13999686
full horizon RMSE                  : 0.23741028
full horizon R2                    : -0.29367034
full horizon Pearson r             : 0.24743317
full horizon QLIKE                 : 0.65184732

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/test_EURUSD_SMLP_model_1_task.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00270254, max=1.47617

Test 1 task EURUSD 3 Vanilla MLP without CV¶

In [ ]:
import os

save_test_SMLP_model_EURUSD_1_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_SMLP_model_1_task.pkl"
)

t1_EURUSD_SMLP_results, t1_EURUSD_SMLP_nested_results, t1_EURUSD_SMLP_best_model, t1_EURUSD_SMLP_best_params, yy = train_and_evaluate_model(
    model_type="Simple_MLP",
    X_price=eur4_X_price,
    X_time=eur4_X_time,
    y=eur4_y,
    no_tasks=1,
    use_nested_cv=False,
    flatten=True,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,
    y_scale_type="standard",
    save_model_path=save_test_SMLP_model_EURUSD_1_file_path,
    lr=1e-4,
    epochs=50,
    batch_size=4,
    verbose=True,
    time_horizon=20,
    dropout=0.0,
    l2_weight=1e-4,
    patience=10,
    min_epochs=30,
    hidden_dim=128,
    hidden_layers=2,
    min_delta=1e-4,
    single_holdout=False,

    target_mode="log_var_ratio",
    baseline_feature_idx=0,           
    baseline_window=2
)

Test EURUSD 5¶

In [ ]:
import os

save_test_SMLP_model_EURUSD_5_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_SMLP_model_5_task.pkl"
)

t5_EURUSD_SMLP_results, t5_EURUSD_SMLP_nested_results, t5_EURUSD_SMLP_best_model, t5_EURUSD_SMLP_best_params, yy = train_and_evaluate_model(
    model_type="Simple_MLP",
    X_price=eur5_X_price,
    X_time=eur5_X_time,
    y=eur5_y,
    no_tasks=1,
    use_nested_cv=False,
    flatten=True,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=False,
    normalize_y=True,
    y_scale_type="standard",
    save_model_path=save_test_SMLP_model_EURUSD_5_file_path,
    lr=5e-4,
    epochs=50,
    batch_size=64,
    verbose=True,
    time_horizon=20,
    dropout=0.0,
    l2_weight=1e-4,
    patience=10,
    min_epochs=30,
    hidden_dim=512,
    hidden_layers=3,
    min_delta=1e-4,
    single_holdout=False,

    target_mode="log_mse",
    baseline_feature_idx=0,           
    baseline_window=20
)
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  0.0
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_train_core (log_mse scaled):
Shape: (2723, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.998957169490066
  Min value:  -3.7200073694899434
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.40793072075585246
  Min value:  0.00022460106458896233
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_val (log_mse scaled):
Shape: (302, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3212202110774225
  Min value:  -2.9768947428361368
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4442211143925813
  Min value:  -0.00012220230225973657
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_test (log_mse scaled):
Shape: (757, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3856309038737162
  Min value:  -5.3523385822659755

Size of the X data for training (core): (2723, 60)
Size of the Y data for training (core): (2723, 20)
Epoch 1: Train mse = 1.1085 | Val mse = 1.1654
Epoch 2: Train mse = 0.8407 | Val mse = 1.1265
Epoch 3: Train mse = 0.6316 | Val mse = 0.7571
Epoch 4: Train mse = 0.5505 | Val mse = 0.8073
Epoch 5: Train mse = 0.5419 | Val mse = 0.8201
Epoch 6: Train mse = 0.4995 | Val mse = 0.6648
Epoch 7: Train mse = 0.4223 | Val mse = 0.5509
Epoch 8: Train mse = 0.3904 | Val mse = 0.4808
Epoch 9: Train mse = 0.3724 | Val mse = 0.4628
Epoch 10: Train mse = 0.3573 | Val mse = 0.4462
Epoch 11: Train mse = 0.3450 | Val mse = 0.4241
Epoch 12: Train mse = 0.3405 | Val mse = 0.4235
Epoch 13: Train mse = 0.3347 | Val mse = 0.4174
Epoch 14: Train mse = 0.3245 | Val mse = 0.3961
Epoch 15: Train mse = 0.3183 | Val mse = 0.4418
Epoch 16: Train mse = 0.3332 | Val mse = 0.4608
Epoch 17: Train mse = 0.3381 | Val mse = 0.4467
Epoch 18: Train mse = 0.3329 | Val mse = 0.4721
Epoch 19: Train mse = 0.3308 | Val mse = 0.4239
Epoch 20: Train mse = 0.3217 | Val mse = 0.4788
Epoch 21: Train mse = 0.3211 | Val mse = 0.3851
Epoch 22: Train mse = 0.3074 | Val mse = 0.4192
Epoch 23: Train mse = 0.3066 | Val mse = 0.4409
Epoch 24: Train mse = 0.2972 | Val mse = 0.3917
Epoch 25: Train mse = 0.2893 | Val mse = 0.4470
Epoch 26: Train mse = 0.2938 | Val mse = 0.3526
Epoch 27: Train mse = 0.2867 | Val mse = 0.4251
Epoch 28: Train mse = 0.2967 | Val mse = 0.3474
Epoch 29: Train mse = 0.2943 | Val mse = 0.4090
Epoch 30: Train mse = 0.2970 | Val mse = 0.3967
Epoch 31: Train mse = 0.2788 | Val mse = 0.3907
Epoch 32: Train mse = 0.2721 | Val mse = 0.3829
Epoch 33: Train mse = 0.2623 | Val mse = 0.3965
Epoch 34: Train mse = 0.2578 | Val mse = 0.4017
Epoch 35: Train mse = 0.2617 | Val mse = 0.3679
Epoch 36: Train mse = 0.2700 | Val mse = 0.3643
Epoch 37: Train mse = 0.2832 | Val mse = 0.4121
Epoch 38: Train mse = 0.2714 | Val mse = 0.3745
Early stopping triggered at epoch 38.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 20
dropout: 0.00000000
lr: 0.00050000
epochs: 50
batch_size: 64
device: mps
verbose: True
hidden_layers: 3
no_tasks: 1
l2_weight: 0.00010000
patience: 10
min_epochs: 30
min_delta: 0.00010000
hidden_dim: 512
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.09464960
1 day(s) RMSE                      : 0.16672542
1 day(s) R2                        : 0.39098781
1 day(s) Pearson r                 : 0.64844177
1 day(s) QLIKE                     : 0.45368584
3 day(s) MAE                       : 0.09935203
3 day(s) RMSE                      : 0.17911570
3 day(s) R2                        : 0.29168359
3 day(s) Pearson r                 : 0.58907491
3 day(s) QLIKE                     : 0.46721591
5 day(s) MAE                       : 0.09917879
5 day(s) RMSE                      : 0.18545364
5 day(s) R2                        : 0.23204108
5 day(s) Pearson r                 : 0.54123295
5 day(s) QLIKE                     : 0.46941656
10 day(s) MAE                      : 0.09952519
10 day(s) RMSE                     : 0.19201849
10 day(s) R2                       : 0.16885026
10 day(s) Pearson r                : 0.47829144
10 day(s) QLIKE                    : 0.49290335
20 day(s) MAE                      : 0.09735010
20 day(s) RMSE                     : 0.19308096
20 day(s) R2                       : 0.14971558
20 day(s) Pearson r                : 0.43720653
20 day(s) QLIKE                    : 0.50702214
full horizon MAE                   : 0.09735010
full horizon RMSE                  : 0.19308096
full horizon R2                    : 0.14971558
full horizon Pearson r             : 0.43720653
full horizon QLIKE                 : 0.50702214

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00419776, max=1.6561

Test EURUSD 6¶

In [ ]:
import os

save_test_SMLP_model_EURUSD_6_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_SMLP_model_6_task.pkl"
)

t6_EURUSD_SMLP_results, t6_EURUSD_SMLP_nested_results, t6_EURUSD_SMLP_best_model, t6_EURUSD_SMLP_best_params, yy = train_and_evaluate_model(
    model_type="Simple_MLP",
    X_price=eur6_X_price,
    X_time=eur6_X_time,
    y=eur6_y,
    no_tasks=1,
    use_nested_cv=False,
    flatten=True,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,
    y_scale_type="standard",
    save_model_path=save_test_SMLP_model_EURUSD_6_file_path,
    lr=5e-4,
    epochs=50,
    batch_size=32,
    verbose=True,
    time_horizon=20,
    dropout=0.0,
    l2_weight=1e-4,
    patience=10,
    min_epochs=30,
    hidden_dim=64,
    hidden_layers=3,
    min_delta=1e-4,
    single_holdout=False,

    target_mode="log_var_ratio",
    baseline_feature_idx=0,          
    baseline_window=2
)
[mode=log_var_ratio] loss_type=mse, target_is_logvar=False, normalize_y=True
Using device: mps
Batch size for y: 3781
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2722, 60, 8)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0000000000000002
  Min value:  0.0
Checking X_time_train_core:
Shape: (2722, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  0.0
Checking y_train_core (log_var_ratio scaled):
Shape: (2722, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.047438323525787
  Min value:  -5.29816439091353
Checking X_price_val:
Shape: (302, 60, 8)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  0.0
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.1249999999999998
  Min value:  0.0
Checking y_val (log_var_ratio scaled):
Shape: (302, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.8046793379658457
  Min value:  -4.56473299178836
Checking X_price_test:
Shape: (757, 60, 8)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.4141668678880164
  Min value:  -0.00012220230225973657
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.4999999999999996
  Min value:  0.0
Checking y_test (log_var_ratio scaled):
Shape: (757, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.109080553652343
  Min value:  -5.056254299402706
Epoch 1: Train mse = 1.0254 | Val mse = 1.1843
Epoch 2: Train mse = 0.9898 | Val mse = 1.1796
Epoch 3: Train mse = 0.9693 | Val mse = 1.1996
Epoch 4: Train mse = 0.9482 | Val mse = 1.1369
Epoch 5: Train mse = 0.9325 | Val mse = 0.9834
Epoch 6: Train mse = 0.9414 | Val mse = 1.0421
Epoch 7: Train mse = 0.9236 | Val mse = 0.8652
Epoch 8: Train mse = 0.9231 | Val mse = 1.0115
Epoch 9: Train mse = 0.8766 | Val mse = 0.8263
Epoch 10: Train mse = 0.8985 | Val mse = 1.0142
Epoch 11: Train mse = 0.8885 | Val mse = 0.8011
Epoch 12: Train mse = 0.8513 | Val mse = 0.7981
Epoch 13: Train mse = 0.8393 | Val mse = 1.1614
Epoch 14: Train mse = 0.8228 | Val mse = 0.8322
Epoch 15: Train mse = 0.8356 | Val mse = 1.5081
Epoch 16: Train mse = 0.9409 | Val mse = 0.8507
Epoch 17: Train mse = 0.8573 | Val mse = 0.8267
Epoch 18: Train mse = 0.8133 | Val mse = 0.9469
Epoch 19: Train mse = 0.7925 | Val mse = 0.8443
Epoch 20: Train mse = 0.7744 | Val mse = 0.7599
Epoch 21: Train mse = 0.7423 | Val mse = 0.8201
Epoch 22: Train mse = 0.7565 | Val mse = 0.8120
Epoch 23: Train mse = 0.7031 | Val mse = 0.6744
Epoch 24: Train mse = 0.7012 | Val mse = 0.8145
Epoch 25: Train mse = 0.7354 | Val mse = 0.8004
Epoch 26: Train mse = 0.7427 | Val mse = 0.9090
Epoch 27: Train mse = 0.6923 | Val mse = 0.7742
Epoch 28: Train mse = 0.6434 | Val mse = 0.6592
Epoch 29: Train mse = 0.6639 | Val mse = 0.6840
Epoch 30: Train mse = 0.6599 | Val mse = 0.9014
Epoch 31: Train mse = 0.6814 | Val mse = 0.7118
Epoch 32: Train mse = 0.6656 | Val mse = 0.8019
Epoch 33: Train mse = 0.7925 | Val mse = 0.7838
Epoch 34: Train mse = 0.7089 | Val mse = 0.7336
Epoch 35: Train mse = 0.6505 | Val mse = 0.6627
Epoch 36: Train mse = 0.6276 | Val mse = 0.6601
Epoch 37: Train mse = 0.6359 | Val mse = 0.6143
Epoch 38: Train mse = 0.6031 | Val mse = 0.7480
Epoch 39: Train mse = 0.6087 | Val mse = 0.5434
Epoch 40: Train mse = 0.6665 | Val mse = 0.7992
Epoch 41: Train mse = 0.6333 | Val mse = 0.6767
Epoch 42: Train mse = 0.6035 | Val mse = 0.5965
Epoch 43: Train mse = 0.5626 | Val mse = 0.5183
Epoch 44: Train mse = 0.5821 | Val mse = 0.5816
Epoch 45: Train mse = 0.6578 | Val mse = 0.5533
Epoch 46: Train mse = 0.5494 | Val mse = 0.8119
Epoch 47: Train mse = 0.6156 | Val mse = 0.5541
Epoch 48: Train mse = 0.6141 | Val mse = 0.5380
Epoch 49: Train mse = 0.6093 | Val mse = 0.6393
Epoch 50: Train mse = 0.5170 | Val mse = 0.5141

Parameters used in the single-fit model:
input_dim: 480
output_dim: 20
dropout: 0.00000000
lr: 0.00050000
epochs: 50
batch_size: 32
device: mps
verbose: True
hidden_layers: 3
no_tasks: 1
l2_weight: 0.00010000
patience: 10
min_epochs: 30
min_delta: 0.00010000
hidden_dim: 64
loss_type: mse
target_is_logvar: False
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.28022477
1 day(s) RMSE                      : 0.64311327
1 day(s) R2                        : -8.06144123
1 day(s) Pearson r                 : 0.23981665
1 day(s) QLIKE                     : 0.70240040
3 day(s) MAE                       : 0.28926402
3 day(s) RMSE                      : 0.71138957
3 day(s) R2                        : -10.17313269
3 day(s) Pearson r                 : 0.16843885
3 day(s) QLIKE                     : 0.75433149
5 day(s) MAE                       : 0.26941983
5 day(s) RMSE                      : 0.65337632
5 day(s) R2                        : -8.53221966
5 day(s) Pearson r                 : 0.16472426
5 day(s) QLIKE                     : 0.83573974
10 day(s) MAE                      : 0.28015105
10 day(s) RMSE                     : 0.68665716
10 day(s) R2                       : -9.62852744
10 day(s) Pearson r                : 0.12698617
10 day(s) QLIKE                    : 0.86454987
20 day(s) MAE                      : 0.30356013
20 day(s) RMSE                     : 0.76471899
20 day(s) R2                       : -12.33795021
20 day(s) Pearson r                : 0.09339941
20 day(s) QLIKE                    : 0.93895486
full horizon MAE                   : 0.30356013
full horizon RMSE                  : 0.76471899
full horizon R2                    : -12.33795021
full horizon Pearson r             : 0.09339941
full horizon QLIKE                 : 0.93895486

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/test_EURUSD_SMLP_model_6_task.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00190013, max=19.7457

Test EURUSD 7¶

In [ ]:
import os

save_test_SMLP_model_EURUSD_7_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_SMLP_model_7_task.pkl"
)

t7_EURUSD_SMLP_results, t7_EURUSD_SMLP_nested_results, t7_EURUSD_SMLP_best_model, t7_EURUSD_SMLP_best_params, yy = train_and_evaluate_model(
    model_type="Simple_MLP",
    X_price=eur7_X_price,
    X_time=eur7_X_time,
    y=eur7_y,
    no_tasks=1,
    use_nested_cv=False,
    flatten=True,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,
    y_scale_type="standard",
    save_model_path=save_test_SMLP_model_EURUSD_7_file_path,
    lr=1e-3,
    epochs=50,
    batch_size=32,
    verbose=True,
    time_horizon=20,
    dropout=0.0,
    l2_weight=1e-5,
    patience=10,
    min_epochs=30,
    hidden_dim=1024,
    hidden_layers=6,
    min_delta=1e-4,
    single_holdout=False,

    target_mode="log_mse",
    baseline_feature_idx=0,          
    baseline_window=20
)
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0000000000000002
  Min value:  0.0
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  0.0
Checking y_train_core (log_mse scaled):
Shape: (2723, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.998957169490066
  Min value:  -3.7200073694899434
Checking X_price_val:
Shape: (302, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7917474776.440305
  Min value:  -0.007013492710980488
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.1249999999999998
  Min value:  0.0
Checking y_val (log_mse scaled):
Shape: (302, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3212202110774225
  Min value:  -2.9768947428361368
Checking X_price_test:
Shape: (757, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.4141668678880164
  Min value:  -0.00012220230225973657
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.4999999999999996
  Min value:  0.0
Checking y_test (log_mse scaled):
Shape: (757, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3856309038737162
  Min value:  -5.3523385822659755
Epoch 1: Train mse = 1.1017 | Val mse = 2.1282
Epoch 2: Train mse = 1.1071 | Val mse = 1.3940
Epoch 3: Train mse = 1.0810 | Val mse = 1.3169
Epoch 4: Train mse = 1.0838 | Val mse = 1.2321
Epoch 5: Train mse = 1.0787 | Val mse = 1.1729
Epoch 6: Train mse = 1.0663 | Val mse = 1.2682
Epoch 7: Train mse = 1.0365 | Val mse = 1.4274
Epoch 8: Train mse = 1.0295 | Val mse = 1.4302
Epoch 9: Train mse = 1.0287 | Val mse = 1.4285
Epoch 10: Train mse = 1.0230 | Val mse = 1.4716
Epoch 11: Train mse = 1.0216 | Val mse = 1.4979
Epoch 12: Train mse = 1.0204 | Val mse = 1.5198
Epoch 13: Train mse = 1.0189 | Val mse = 1.5163
Epoch 14: Train mse = 1.0197 | Val mse = 1.5220
Epoch 15: Train mse = 1.0178 | Val mse = 1.5161
Epoch 16: Train mse = 1.0188 | Val mse = 1.5152
Epoch 17: Train mse = 1.0179 | Val mse = 1.5126
Epoch 18: Train mse = 1.0180 | Val mse = 1.5109
Epoch 19: Train mse = 1.0180 | Val mse = 1.5110
Epoch 20: Train mse = 1.0174 | Val mse = 1.5089
Epoch 21: Train mse = 1.0178 | Val mse = 1.5112
Epoch 22: Train mse = 1.0169 | Val mse = 1.5070
Epoch 23: Train mse = 1.0179 | Val mse = 1.5109
Epoch 24: Train mse = 1.0161 | Val mse = 1.5062
Epoch 25: Train mse = 1.0175 | Val mse = 1.5118
Epoch 26: Train mse = 1.0158 | Val mse = 1.5072
Epoch 27: Train mse = 1.0171 | Val mse = 1.5120
Epoch 28: Train mse = 1.0157 | Val mse = 1.5085
Epoch 29: Train mse = 1.0169 | Val mse = 1.5132
Epoch 30: Train mse = 1.0156 | Val mse = 1.5082
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 20
dropout: 0.00000000
lr: 0.00100000
epochs: 50
batch_size: 32
device: mps
verbose: True
hidden_layers: 6
no_tasks: 1
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
hidden_dim: 1024
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.12405876
1 day(s) RMSE                      : 0.22466625
1 day(s) R2                        : -0.10585455
1 day(s) Pearson r                 : -0.23082149
1 day(s) QLIKE                     : 0.58506520
3 day(s) MAE                       : 0.12372473
3 day(s) RMSE                      : 0.22406257
3 day(s) R2                        : -0.10840534
3 day(s) Pearson r                 : -0.00037619
3 day(s) QLIKE                     : 0.58378936
5 day(s) MAE                       : 0.12295376
5 day(s) RMSE                      : 0.22247751
5 day(s) R2                        : -0.10519648
5 day(s) Pearson r                 : -0.00265316
5 day(s) QLIKE                     : 0.58168654
10 day(s) MAE                      : 0.12222232
10 day(s) RMSE                     : 0.22107074
10 day(s) R2                       : -0.10168063
10 day(s) Pearson r                : -0.00250193
10 day(s) QLIKE                    : 0.57942102
20 day(s) MAE                      : 0.12154976
20 day(s) RMSE                     : 0.21998599
20 day(s) R2                       : -0.10376191
20 day(s) Pearson r                : 0.00125410
20 day(s) QLIKE                    : 0.57495934
full horizon MAE                   : 0.12154976
full horizon RMSE                  : 0.21998599
full horizon R2                    : -0.10376191
full horizon Pearson r             : 0.00125410
full horizon QLIKE                 : 0.57495934

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/test_EURUSD_SMLP_model_7_task.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.115914, max=0.126551

Test 8¶

In [576]:
import os

save_test_SMLP_model_EURUSD_8_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_SMLP_model_8_task.pkl"
)

t8_EURUSD_SMLP_results, t8_EURUSD_SMLP_nested_results, t8_EURUSD_SMLP_best_model, t8_EURUSD_SMLP_best_params, yy = train_and_evaluate_model(
    model_type="Simple_MLP",
    X_price=eur8_X_price,
    X_time=eur8_X_time,
    y=eur8_y,
    no_tasks=1,
    use_nested_cv=False,
    flatten=True,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,
    y_scale_type="standard",
    save_model_path=save_test_SMLP_model_EURUSD_8_file_path,
    lr=5e-4,
    epochs=100,
    batch_size=64,
    verbose=True,
    time_horizon=20,
    dropout=0.0,
    l2_weight=1e-4,
    patience=10,
    min_epochs=30,
    hidden_dim=64,
    hidden_layers=3,
    min_delta=1e-4,
    single_holdout=False,

    target_mode="log_mse",
    baseline_feature_idx=0,           # set to the feature that carries your returns
    baseline_window=2
)
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525025
  Min value:  -0.7148707238463011
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.998957169490066
  Min value:  -3.7200073694899434
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.408921165171427
  Min value:  -0.7103978753601863
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3212202110774225
  Min value:  -2.9768947428361368
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144182
  Min value:  -0.7173043382357188
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3856309038737162
  Min value:  -5.3523385822659755
Epoch 1: Train mse = 0.9477 | Val mse = 0.9289
Epoch 2: Train mse = 0.6873 | Val mse = 0.7190
Epoch 3: Train mse = 0.5437 | Val mse = 0.5929
Epoch 4: Train mse = 0.4629 | Val mse = 0.5233
Epoch 5: Train mse = 0.4166 | Val mse = 0.4886
Epoch 6: Train mse = 0.3930 | Val mse = 0.4673
Epoch 7: Train mse = 0.3774 | Val mse = 0.4519
Epoch 8: Train mse = 0.3649 | Val mse = 0.4401
Epoch 9: Train mse = 0.3538 | Val mse = 0.4308
Epoch 10: Train mse = 0.3435 | Val mse = 0.4238
Epoch 11: Train mse = 0.3344 | Val mse = 0.4185
Epoch 12: Train mse = 0.3268 | Val mse = 0.4142
Epoch 13: Train mse = 0.3202 | Val mse = 0.4105
Epoch 14: Train mse = 0.3142 | Val mse = 0.4072
Epoch 15: Train mse = 0.3089 | Val mse = 0.4044
Epoch 16: Train mse = 0.3040 | Val mse = 0.4021
Epoch 17: Train mse = 0.2997 | Val mse = 0.4003
Epoch 18: Train mse = 0.2957 | Val mse = 0.3987
Epoch 19: Train mse = 0.2920 | Val mse = 0.3973
Epoch 20: Train mse = 0.2885 | Val mse = 0.3962
Epoch 21: Train mse = 0.2852 | Val mse = 0.3953
Epoch 22: Train mse = 0.2821 | Val mse = 0.3946
Epoch 23: Train mse = 0.2791 | Val mse = 0.3941
Epoch 24: Train mse = 0.2762 | Val mse = 0.3939
Epoch 25: Train mse = 0.2734 | Val mse = 0.3939
Epoch 26: Train mse = 0.2707 | Val mse = 0.3940
Epoch 27: Train mse = 0.2681 | Val mse = 0.3943
Epoch 28: Train mse = 0.2656 | Val mse = 0.3947
Epoch 29: Train mse = 0.2631 | Val mse = 0.3953
Epoch 30: Train mse = 0.2607 | Val mse = 0.3960
Epoch 31: Train mse = 0.2583 | Val mse = 0.3968
Epoch 32: Train mse = 0.2560 | Val mse = 0.3977
Epoch 33: Train mse = 0.2537 | Val mse = 0.3986
Epoch 34: Train mse = 0.2515 | Val mse = 0.3994
Early stopping triggered at epoch 34.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 20
dropout: 0.00000000
lr: 0.00050000
epochs: 100
batch_size: 64
device: mps
verbose: True
hidden_layers: 3
no_tasks: 1
l2_weight: 0.00010000
patience: 10
min_epochs: 30
min_delta: 0.00010000
hidden_dim: 64
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.08745844
1 day(s) RMSE                      : 0.17746631
1 day(s) R2                        : 0.30999192
1 day(s) Pearson r                 : 0.57966146
1 day(s) QLIKE                     : 0.43444844
3 day(s) MAE                       : 0.09123118
3 day(s) RMSE                      : 0.18515062
3 day(s) R2                        : 0.24314910
3 day(s) Pearson r                 : 0.52439783
3 day(s) QLIKE                     : 0.46276500
5 day(s) MAE                       : 0.09204460
5 day(s) RMSE                      : 0.18804839
5 day(s) R2                        : 0.21040116
5 day(s) Pearson r                 : 0.49463939
5 day(s) QLIKE                     : 0.46525915
10 day(s) MAE                      : 0.09347147
10 day(s) RMSE                     : 0.19281354
10 day(s) R2                       : 0.16195323
10 day(s) Pearson r                : 0.44891100
10 day(s) QLIKE                    : 0.47208852
20 day(s) MAE                      : 0.09463232
20 day(s) RMSE                     : 0.19519930
20 day(s) R2                       : 0.13095581
20 day(s) Pearson r                : 0.41578511
20 day(s) QLIKE                    : 0.48778836
full horizon MAE                   : 0.09463232
full horizon RMSE                  : 0.19519930
full horizon R2                    : 0.13095581
full horizon Pearson r             : 0.41578511
full horizon QLIKE                 : 0.48778836

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/test_EURUSD_SMLP_model_8_task.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00411695, max=1.19432

Check y¶

In [507]:
y_true_t0 = yy[:, 0, 0]
y_pred_t0 = yy[:, 0, 1]

print(f"y_true @t=0  std={np.nanstd(y_true_t0):.6g}, max={np.nanmax(y_true_t0):.6g}, min={np.nanmin(y_true_t0):.6g}")
print(f"y_pred @t=0  std={np.nanstd(y_pred_t0):.6g}, max={np.nanmax(y_pred_t0):.6g}, min={np.nanmin(y_pred_t0):.6g}")
y_true @t=0  std=0.213643, max=2.66457, min=9.56189e-05
y_pred @t=0  std=0.520235, max=4.87784, min=0.00883726

Vanila MLP 1 task different tests¶

In [ ]:
import os
import numpy as np

save_test_SMLP_model_AAPL_1_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_AAPL_SMLP_model_1_task.pkl"
)
# appl4_y2 = appl4_y.copy()
# appl4_y2[:,:,0] = np.log(np.clip(appl4_y2[:,:,0], eps, None))

t1_AAPL_SMLP_results, t1_AAPL_SMLP_nested_results, t1_AAPL_SMLP_best_model, t1_AAPL_SMLP_best_params, yy = train_and_evaluate_model(
    model_type="Simple_MLP",
    X_price=appl8_X_price,
    X_time=appl8_X_time,
    #y=appl4_y2,
    y=appl8_y,
    no_tasks=1,
    use_nested_cv=False,
    flatten=True,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,
    y_scale_type="standard",
    save_model_path=save_test_SMLP_model_AAPL_1_file_path,
    lr=5e-5,
    epochs=50,
    batch_size=4,
    verbose=True,
    time_horizon=5,
    dropout=0.0,
    l2_weight=1e-4,
    patience=10,
    min_epochs=20,
    hidden_dim=32,
    hidden_layers=2,
    min_delta=1e-4,
    target_mode="log_mse",
    baseline_feature_idx=0,         
    baseline_window=60
)
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714204
  Min value:  -0.03434294798653995
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.140231129276094
  Min value:  -2.358314580069225
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.051920323212192546
  Min value:  -0.0338203458440787
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.907938252663739
  Min value:  -1.632926464574096
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.19306601030720755
  Min value:  -0.03421247164703902
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.896926346992255
  Min value:  -2.123830418076347
Epoch 1: Train mse = 1.0106 | Val mse = 0.6708
Epoch 2: Train mse = 1.0124 | Val mse = 0.6782
Epoch 3: Train mse = 1.0021 | Val mse = 0.6762
Epoch 4: Train mse = 0.9895 | Val mse = 0.6797
Epoch 5: Train mse = 0.9760 | Val mse = 0.6904
Epoch 6: Train mse = 0.9610 | Val mse = 0.7086
Epoch 7: Train mse = 0.9441 | Val mse = 0.7328
Epoch 8: Train mse = 0.9248 | Val mse = 0.7592
Epoch 9: Train mse = 0.9026 | Val mse = 0.7834
Epoch 10: Train mse = 0.8778 | Val mse = 0.8010
Epoch 11: Train mse = 0.8516 | Val mse = 0.8064
Epoch 12: Train mse = 0.8258 | Val mse = 0.7960
Epoch 13: Train mse = 0.8018 | Val mse = 0.7726
Epoch 14: Train mse = 0.7806 | Val mse = 0.7432
Epoch 15: Train mse = 0.7623 | Val mse = 0.7138
Epoch 16: Train mse = 0.7468 | Val mse = 0.6883
Epoch 17: Train mse = 0.7337 | Val mse = 0.6680
Epoch 18: Train mse = 0.7228 | Val mse = 0.6529
Epoch 19: Train mse = 0.7135 | Val mse = 0.6421
Epoch 20: Train mse = 0.7058 | Val mse = 0.6347
Epoch 21: Train mse = 0.6992 | Val mse = 0.6294
Epoch 22: Train mse = 0.6935 | Val mse = 0.6254
Epoch 23: Train mse = 0.6886 | Val mse = 0.6222
Epoch 24: Train mse = 0.6843 | Val mse = 0.6192
Epoch 25: Train mse = 0.6805 | Val mse = 0.6163
Epoch 26: Train mse = 0.6770 | Val mse = 0.6135
Epoch 27: Train mse = 0.6739 | Val mse = 0.6106
Epoch 28: Train mse = 0.6710 | Val mse = 0.6078
Epoch 29: Train mse = 0.6683 | Val mse = 0.6050
Epoch 30: Train mse = 0.6657 | Val mse = 0.6021
Epoch 31: Train mse = 0.6633 | Val mse = 0.5994
Epoch 32: Train mse = 0.6610 | Val mse = 0.5966
Epoch 33: Train mse = 0.6589 | Val mse = 0.5940
Epoch 34: Train mse = 0.6568 | Val mse = 0.5914
Epoch 35: Train mse = 0.6548 | Val mse = 0.5889
Epoch 36: Train mse = 0.6528 | Val mse = 0.5864
Epoch 37: Train mse = 0.6509 | Val mse = 0.5840
Epoch 38: Train mse = 0.6491 | Val mse = 0.5817
Epoch 39: Train mse = 0.6473 | Val mse = 0.5795
Epoch 40: Train mse = 0.6456 | Val mse = 0.5773
Epoch 41: Train mse = 0.6439 | Val mse = 0.5752
Epoch 42: Train mse = 0.6423 | Val mse = 0.5731
Epoch 43: Train mse = 0.6407 | Val mse = 0.5711
Epoch 44: Train mse = 0.6392 | Val mse = 0.5691
Epoch 45: Train mse = 0.6377 | Val mse = 0.5671
Epoch 46: Train mse = 0.6363 | Val mse = 0.5652
Epoch 47: Train mse = 0.6348 | Val mse = 0.5633
Epoch 48: Train mse = 0.6335 | Val mse = 0.5614
Epoch 49: Train mse = 0.6321 | Val mse = 0.5596
Epoch 50: Train mse = 0.6308 | Val mse = 0.5577

Parameters used in the single-fit model:
input_dim: 60
output_dim: 5
dropout: 0.00000000
lr: 0.00005000
epochs: 50
batch_size: 4
device: mps
verbose: True
hidden_layers: 2
no_tasks: 1
l2_weight: 0.00010000
patience: 10
min_epochs: 20
min_delta: 0.00010000
hidden_dim: 32
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.58473997
1 day(s) RMSE                      : 8.00624713
1 day(s) R2                        : 0.14183473
1 day(s) Pearson r                 : 0.45541136
1 day(s) QLIKE                     : 0.32111413
3 day(s) MAE                       : 2.64977552
3 day(s) RMSE                      : 8.35105197
3 day(s) R2                        : 0.11752366
3 day(s) Pearson r                 : 0.40812205
3 day(s) QLIKE                     : 0.36502756
5 day(s) MAE                       : 2.71750510
5 day(s) RMSE                      : 8.53601870
5 day(s) R2                        : 0.08817267
5 day(s) Pearson r                 : 0.33117256
5 day(s) QLIKE                     : 0.40021435
full horizon MAE                   : 2.71750510
full horizon RMSE                  : 8.53601870
full horizon R2                    : 0.08817267
full horizon Pearson r             : 0.33117256
full horizon QLIKE                 : 0.40021435

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/test_AAPL_SMLP_model_1_task.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=1.45567, max=14.5826

Check output y¶

In [560]:
y_true_t0 = yy[:, 0, 0]
y_pred_t0 = yy[:, 0, 1]

print(f"y_true @t=0  std={np.nanstd(y_true_t0):.6g}, max={np.nanmax(y_true_t0):.6g}, min={np.nanmin(y_true_t0):.6g}")
print(f"y_pred @t=0  std={np.nanstd(y_pred_t0):.6g}, max={np.nanmax(y_pred_t0):.6g}, min={np.nanmin(y_pred_t0):.6g}")
y_true @t=0  std=8.64258, max=112.408, min=0.314451
y_pred @t=0  std=2.35492, max=19.8502, min=1.52246

Test 1 task with Vanila MLP with CV¶

In [142]:
import os

save_checkpoint_SMLP_model_1_CV_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "checkpoint_SMLP_model_1_CV.pkl"
)

save_test_SMLP_model_1_CV_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_SMLP_model_1_CV.pkl"
)

param_grid_test_SMLP_1_CV_aapl_cv = {
    "dropout": [0.2],
    "lr": [3e-5],
    "batch_size": [128, 256],
    "epochs": [3],
    "resume": [True],
    "verbose": [True]
}

t1CV_SMLP_results, t1CV_SMLP_nested_results, t1CV_SMLP_best_model, t1CV_SMLP_best_params, _ = train_and_evaluate_model(
    model_type="Simple_MLP",
    X_price=aapl_X_price,
    X_time=aapl_X_time,
    y=aapl_y,
    no_tasks=1,
    use_nested_cv=True,
    flatten=True,
    merge_price_time=True,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,
    save_model_path=save_test_SMLP_model_1_CV_file_path,
    lr=3e-6,
    epochs=150,
    batch_size=64,
    verbose=True,
    hidden_layers=3,
    outer_folds=2,
    inner_folds=2,
    param_grid=param_grid_test_SMLP_1_CV_aapl_cv,
    checkpoint_path= save_checkpoint_SMLP_model_1_CV_file_path,
    hidden_dim=128
)
model parameter(s) from the grid will overwrite any overlapping parameters provided directly to this function
Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 1

Nested cross-validation with grid search:
Outer fold 1:
Using device: mps

Size of the X data for training in the inner fold 1: (454, 1380)
Size of the Y data for training in the inner fold 1: (454, 1)
Epoch 1: Train loss = 0.3332, Val loss = 0.4807
Epoch 2: Train loss = 0.3200, Val loss = 0.4678
Epoch 3: Train loss = 0.3094, Val loss = 0.4559
Using device: mps

Size of the X data for training in the inner fold 2: (455, 1380)
Size of the Y data for training in the inner fold 2: (455, 1)
Epoch 1: Train loss = 0.3982, Val loss = 0.3572
Epoch 2: Train loss = 0.3926, Val loss = 0.3430
Epoch 3: Train loss = 0.3811, Val loss = 0.3282
Using device: mps

Size of the X data for training in the inner fold 1: (454, 1380)
Size of the Y data for training in the inner fold 1: (454, 1)
Epoch 1: Train loss = 0.2928, Val loss = 0.4498
Epoch 2: Train loss = 0.2900, Val loss = 0.4431
Epoch 3: Train loss = 0.2851, Val loss = 0.4366
Using device: mps

Size of the X data for training in the inner fold 2: (455, 1380)
Size of the Y data for training in the inner fold 2: (455, 1)
Epoch 1: Train loss = 0.4360, Val loss = 0.3942
Epoch 2: Train loss = 0.4318, Val loss = 0.3870
Epoch 3: Train loss = 0.4253, Val loss = 0.3796

Size of the X data for training in the outer fold: (908, 1380)
Size of the Y data for training in the outer fold: (908, 1)
Using device: mps
Epoch 1: Train loss = 0.2409, Val loss = 0.2667
Epoch 2: Train loss = 0.2322, Val loss = 0.2549
Epoch 3: Train loss = 0.2207, Val loss = 0.2424

Outer Fold 1 — Task 1 Metrics:
  1 day(s) MAE (log-var)             : 1.2729
  1 day(s) RMSE (log-var)            : 1.5876
  1 day(s) R2 (log-var)              : -1.4775
  1 day(s) MAE (var)                 : 3.8590
  1 day(s) RMSE (var)                : 10.3278
  1 day(s) R2 (var)                  : -0.1593
  1 day(s) QLIKE (var)               : 0.6606
  full horizon MAE (log-var)         : 1.2729
  full horizon RMSE (log-var)        : 1.5876
  full horizon R2 (log-var)          : -1.4775
  full horizon MAE (var)             : 3.8590
  full horizon RMSE (var)            : 10.3278
  full horizon R2 (var)              : -0.1593
  full horizon QLIKE (var)           : 0.6606
Outer fold 2:
Using device: mps

Size of the X data for training in the inner fold 1: (454, 1380)
Size of the Y data for training in the inner fold 1: (454, 1)
Epoch 1: Train loss = 0.1575, Val loss = 0.1674
Epoch 2: Train loss = 0.1438, Val loss = 0.1506
Epoch 3: Train loss = 0.1335, Val loss = 0.1335
Using device: mps

Size of the X data for training in the inner fold 2: (455, 1380)
Size of the Y data for training in the inner fold 2: (455, 1)
Epoch 1: Train loss = 0.2713, Val loss = 0.2705
Epoch 2: Train loss = 0.2607, Val loss = 0.2520
Epoch 3: Train loss = 0.2477, Val loss = 0.2349
Using device: mps

Size of the X data for training in the inner fold 1: (454, 1380)
Size of the Y data for training in the inner fold 1: (454, 1)
Epoch 1: Train loss = 0.2942, Val loss = 0.3100
Epoch 2: Train loss = 0.2901, Val loss = 0.3045
Epoch 3: Train loss = 0.2860, Val loss = 0.2997
Using device: mps

Size of the X data for training in the inner fold 2: (455, 1380)
Size of the Y data for training in the inner fold 2: (455, 1)
Epoch 1: Train loss = 0.3083, Val loss = 0.3238
Epoch 2: Train loss = 0.3024, Val loss = 0.3144
Epoch 3: Train loss = 0.2943, Val loss = 0.3036

Size of the X data for training in the outer fold: (908, 1380)
Size of the Y data for training in the outer fold: (908, 1)
Using device: mps
Epoch 1: Train loss = 0.2239, Val loss = 0.2457
Epoch 2: Train loss = 0.2025, Val loss = 0.2139
Epoch 3: Train loss = 0.1748, Val loss = 0.1794

Outer Fold 2 — Task 1 Metrics:
  1 day(s) MAE (log-var)             : 1.6916
  1 day(s) RMSE (log-var)            : 1.8694
  1 day(s) R2 (log-var)              : -4.3799
  1 day(s) MAE (var)                 : 3.1747
  1 day(s) RMSE (var)                : 7.0885
  1 day(s) R2 (var)                  : -0.2496
  1 day(s) QLIKE (var)               : 0.4148
  full horizon MAE (log-var)         : 1.6916
  full horizon RMSE (log-var)        : 1.8694
  full horizon R2 (log-var)          : -4.3799
  full horizon MAE (var)             : 3.1747
  full horizon RMSE (var)            : 7.0885
  full horizon R2 (var)              : -0.2496
  full horizon QLIKE (var)           : 0.4148

Nested CV Average Metrics with 95% CI:
Nested 1 day(s) MAE (log-var)                : 1.4822
Nested 1 day(s) MAE (log-var) 95% CI         : (-1.1780, 4.1425)
Nested 1 day(s) MAE (var)                    : 3.5169
Nested 1 day(s) MAE (var) 95% CI             : (-0.8311, 7.8648)
Nested 1 day(s) QLIKE (var)                  : 0.5377
Nested 1 day(s) QLIKE (var) 95% CI           : (-1.0238, 2.0993)
Nested 1 day(s) R2 (log-var)                 : -2.9287
Nested 1 day(s) R2 (log-var) 95% CI          : (-21.3679, 15.5104)
Nested 1 day(s) R2 (var)                     : -0.2045
Nested 1 day(s) R2 (var) 95% CI              : (-0.7781, 0.3692)
Nested 1 day(s) RMSE (log-var)               : 1.7285
Nested 1 day(s) RMSE (log-var) 95% CI        : (-0.0614, 3.5185)
Nested 1 day(s) RMSE (var)                   : 8.7081
Nested 1 day(s) RMSE (var) 95% CI            : (-11.8710, 29.2873)
Nested full horizon MAE (log-var)            : 1.4822
Nested full horizon MAE (log-var) 95% CI     : (-1.1780, 4.1425)
Nested full horizon MAE (var)                : 3.5169
Nested full horizon MAE (var) 95% CI         : (-0.8311, 7.8648)
Nested full horizon QLIKE (var)              : 0.5377
Nested full horizon QLIKE (var) 95% CI       : (-1.0238, 2.0993)
Nested full horizon R2 (log-var)             : -2.9287
Nested full horizon R2 (log-var) 95% CI      : (-21.3679, 15.5104)
Nested full horizon R2 (var)                 : -0.2045
Nested full horizon R2 (var) 95% CI          : (-0.7781, 0.3692)
Nested full horizon RMSE (log-var)           : 1.7285
Nested full horizon RMSE (log-var) 95% CI    : (-0.0614, 3.5185)
Nested full horizon RMSE (var)               : 8.7081
Nested full horizon RMSE (var) 95% CI        : (-11.8710, 29.2873)

Best parameters found via nested CV:
  batch_size: 128
  dropout: 0.2
  epochs: 3
  lr: 3e-05
  resume: True
  verbose: True

Refitting the final model with the best parameters found in the nested CV:
Using device: mps

Size of the X data for training: (1453, 1380)
Size of the Y data for training: (1453, 1)

Final refit model parameters (after nested CV):
input_dim: 1380
output_dim: 1
hidden_layers: 3
dropout: 0.20000000
lr: 0.00003000
epochs: 3
batch_size: 128
device: mps
verbose: True
no_tasks: 1
l2_weight: 0.00000000
patience: 10
min_epochs: 50
min_delta: 0.00010000
hidden_dim: 128
Epoch 1: Train loss = 0.0901, Val loss = 0.0448
Epoch 2: Train loss = 0.0747, Val loss = 0.0461
Epoch 3: Train loss = 0.0686, Val loss = 0.0571

Results after refitting the final model:

Task 1 Final Metrics:
  1 day(s) MAE (log-var)                  : 0.6673
  1 day(s) RMSE (log-var)                 : 0.8634
  1 day(s) R2 (log-var)                   : -0.1150
  1 day(s) MAE (var)                      : 2.5451
  1 day(s) RMSE (var)                     : 8.6897
  1 day(s) R2 (var)                       : -0.0109
  1 day(s) QLIKE (var)                    : 0.5363
  full horizon MAE (log-var)              : 0.6673
  full horizon RMSE (log-var)             : 0.8634
  full horizon R2 (log-var)               : -0.1150
  full horizon MAE (var)                  : 2.5451
  full horizon RMSE (var)                 : 8.6897
  full horizon R2 (var)                   : -0.0109
  full horizon QLIKE (var)                : 0.5363

Final nested-CV model saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/test_SMLP_model_1_CV.pkl

OHO with parameter search - Vanila MLP - 1 task - data 3 - horizon 1¶

In [ ]:
import os

save_test_OHO_SMLP_model_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_OHO_SMLP_model.pkl"
)

param_grid = {
    "lr": [5e-5, 5e-4],
    "dropout": [0, 0.1],
    "l2_weight": [5e-5, 5e-4],
    "batch_size": [256, 512],
    "hidden_layers": [2, 4],
    "hidden_dim": [64, 256]
}

t1d3_OHO_SMLP_results, t1d3_OHO_SMLP_nested_results, t1d3_OHO_SMLP_best_model, t1d3_OHO_SMLP_best_params, t1d3_OHO_SMLP_y_data  = train_and_evaluate_model(
    
    param_grid=param_grid,

    verbose=True,
    use_nested_cv=False,
    single_holdout=True,

    merge_price_time=True,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,

    # some models might not need this
    min_delta=1e-4,

    # model name for each
    model_type="Simple_MLP",

    # data for each scenario
    X_price=eur3_X_price,
    X_time=eur3_X_time,
    y=eur3_y,
    
    # specify these for each data
    no_tasks=1,
    flatten=True,
    time_horizon=1,

    # these have to be adjusted for some models
    patience=10,
    epochs=100,
    min_epochs=50,

    # saving model
    save_model_path=save_test_OHO_SMLP_model_file_path,

    # these go into the parameter grid
    lr=1e-5,
    dropout=0,
    l2_weight=1e-4,
    batch_size=64,
    
    hidden_layers=3,
    hidden_dim=256
)

OHO with parameter search - Vanila MLP - 1 task - data 3 - horizon 28¶

In [ ]:
import os

save_test_OHO_SMLP_model_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_OHO_SMLP_model.pkl"
)

param_grid = {
    "lr": [5e-5, 5e-4],
    "dropout": [0, 0.1],
    "l2_weight": [5e-5, 5e-4],
    "batch_size": [256, 512],
    "hidden_layers": [2, 4],
    "hidden_dim": [64, 256]
}

t1d3_OHO_SMLP_results, t1d3_OHO_SMLP_nested_results, t1d3_OHO_SMLP_best_model, t1d3_OHO_SMLP_best_params, t1d3_OHO_SMLP_y_data  = train_and_evaluate_model(
    
    param_grid=param_grid,

    verbose=True,
    use_nested_cv=False,
    single_holdout=True,

    merge_price_time=True,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,

    # some models might not need this
    min_delta=1e-4,

    # model name for each
    model_type="Simple_MLP",

    # data for each scenario
    X_price=eur3_X_price,
    X_time=eur3_X_time,
    y=eur3_y,
    
    # specify these for each data
    no_tasks=1,
    flatten=True,
    time_horizon=28,

    # these have to be adjusted for some models
    patience=10,
    epochs=100,
    min_epochs=50,

    # saving model
    save_model_path=save_test_OHO_SMLP_model_file_path,

    # these go into the parameter grid
    lr=1e-5,
    dropout=0,
    l2_weight=1e-4,
    batch_size=64,
    
    hidden_layers=3,
    hidden_dim=256
)

OHO with APPL4 Vanila LSTM¶

In [336]:
import os

save_test_OHO_SMLP_model_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_OHO_SMLP_model.pkl"
)

param_grid = {
    "lr": [5e-5, 5e-4],
    "dropout": [0, 0.1],
    "l2_weight": [5e-5, 5e-4],
    "batch_size": [256, 512],
    "hidden_layers": [2, 4],
    "hidden_dim": [64, 256]
}

t1d3_OHO_SMLP_results, t1d3_OHO_SMLP_nested_results, t1d3_OHO_SMLP_best_model, t1d3_OHO_SMLP_best_params, t1d3_OHO_SMLP_y_data  = train_and_evaluate_model(
    
    param_grid=param_grid,

    verbose=True,
    use_nested_cv=False,
    single_holdout=True,

    merge_price_time=True,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,

    # some models might not need this
    min_delta=1e-4,

    # model name for each
    model_type="Simple_MLP",

    # data for each scenario
    X_price=appl4_X_price,
    X_time=appl4_X_time,
    y=appl4_y,

    # specify these for each data
    no_tasks=1,
    flatten=True,
    time_horizon=28,

    # these have to be adjusted for some models
    patience=10,
    epochs=100,
    min_epochs=50,

    # saving model
    save_model_path=save_test_OHO_SMLP_model_file_path,

    # these go into the parameter grid
    lr=1e-5,
    dropout=0,
    l2_weight=1e-4,
    batch_size=64,
    
    hidden_layers=3,
    hidden_dim=256
)
model parameter(s) from the grid will overwrite any overlapping parameters provided directly to this function
Using device: mps
Batch size for y: 2018
Time steps for y: 28
Features for y: 1

Single holdout: one train/test split, param search on a single train/val (early-stop) split.
Using device: mps
Epoch 1: Train loss = 0.0384, Val loss = 0.0213
Epoch 2: Train loss = 0.0344, Val loss = 0.0186
Epoch 3: Train loss = 0.0312, Val loss = 0.0163
Epoch 4: Train loss = 0.0281, Val loss = 0.0141
Epoch 5: Train loss = 0.0253, Val loss = 0.0124
Epoch 6: Train loss = 0.0226, Val loss = 0.0115
Epoch 7: Train loss = 0.0207, Val loss = 0.0110
Epoch 8: Train loss = 0.0190, Val loss = 0.0107
Epoch 9: Train loss = 0.0175, Val loss = 0.0104
Epoch 10: Train loss = 0.0162, Val loss = 0.0098
Epoch 11: Train loss = 0.0149, Val loss = 0.0091
Epoch 12: Train loss = 0.0138, Val loss = 0.0084
Epoch 13: Train loss = 0.0126, Val loss = 0.0078
Epoch 14: Train loss = 0.0117, Val loss = 0.0074
Epoch 15: Train loss = 0.0107, Val loss = 0.0072
Epoch 16: Train loss = 0.0100, Val loss = 0.0073
Epoch 17: Train loss = 0.0094, Val loss = 0.0073
Epoch 18: Train loss = 0.0089, Val loss = 0.0073
Epoch 19: Train loss = 0.0085, Val loss = 0.0072
Epoch 20: Train loss = 0.0081, Val loss = 0.0071
Epoch 21: Train loss = 0.0079, Val loss = 0.0070
Epoch 22: Train loss = 0.0076, Val loss = 0.0070
Epoch 23: Train loss = 0.0074, Val loss = 0.0070
Epoch 24: Train loss = 0.0072, Val loss = 0.0071
Epoch 25: Train loss = 0.0071, Val loss = 0.0072
Epoch 26: Train loss = 0.0070, Val loss = 0.0072
Epoch 27: Train loss = 0.0069, Val loss = 0.0073
Epoch 28: Train loss = 0.0069, Val loss = 0.0074
Epoch 29: Train loss = 0.0069, Val loss = 0.0075
Epoch 30: Train loss = 0.0068, Val loss = 0.0076
Epoch 31: Train loss = 0.0068, Val loss = 0.0077
Epoch 32: Train loss = 0.0067, Val loss = 0.0077
Epoch 33: Train loss = 0.0067, Val loss = 0.0078
Epoch 34: Train loss = 0.0067, Val loss = 0.0078
Epoch 35: Train loss = 0.0067, Val loss = 0.0079
Epoch 36: Train loss = 0.0067, Val loss = 0.0080
Epoch 37: Train loss = 0.0067, Val loss = 0.0080
Epoch 38: Train loss = 0.0066, Val loss = 0.0081
Epoch 39: Train loss = 0.0066, Val loss = 0.0082
Epoch 40: Train loss = 0.0066, Val loss = 0.0081
Epoch 41: Train loss = 0.0066, Val loss = 0.0081
Epoch 42: Train loss = 0.0066, Val loss = 0.0083
Epoch 43: Train loss = 0.0065, Val loss = 0.0084
Epoch 44: Train loss = 0.0065, Val loss = 0.0084
Epoch 45: Train loss = 0.0065, Val loss = 0.0083
Epoch 46: Train loss = 0.0065, Val loss = 0.0084
Epoch 47: Train loss = 0.0065, Val loss = 0.0085
Epoch 48: Train loss = 0.0065, Val loss = 0.0087
Epoch 49: Train loss = 0.0065, Val loss = 0.0085
Epoch 50: Train loss = 0.0065, Val loss = 0.0086
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0439, Val loss = 0.0217
Epoch 2: Train loss = 0.0249, Val loss = 0.0201
Epoch 3: Train loss = 0.0150, Val loss = 0.0125
Epoch 4: Train loss = 0.0096, Val loss = 0.0110
Epoch 5: Train loss = 0.0077, Val loss = 0.0101
Epoch 6: Train loss = 0.0068, Val loss = 0.0092
Epoch 7: Train loss = 0.0068, Val loss = 0.0093
Epoch 8: Train loss = 0.0067, Val loss = 0.0095
Epoch 9: Train loss = 0.0066, Val loss = 0.0093
Epoch 10: Train loss = 0.0065, Val loss = 0.0100
Epoch 11: Train loss = 0.0065, Val loss = 0.0095
Epoch 12: Train loss = 0.0065, Val loss = 0.0099
Epoch 13: Train loss = 0.0064, Val loss = 0.0094
Epoch 14: Train loss = 0.0064, Val loss = 0.0097
Epoch 15: Train loss = 0.0064, Val loss = 0.0095
Epoch 16: Train loss = 0.0063, Val loss = 0.0095
Epoch 17: Train loss = 0.0063, Val loss = 0.0097
Epoch 18: Train loss = 0.0063, Val loss = 0.0093
Epoch 19: Train loss = 0.0063, Val loss = 0.0101
Epoch 20: Train loss = 0.0063, Val loss = 0.0094
Epoch 21: Train loss = 0.0063, Val loss = 0.0100
Epoch 22: Train loss = 0.0062, Val loss = 0.0099
Epoch 23: Train loss = 0.0062, Val loss = 0.0089
Epoch 24: Train loss = 0.0062, Val loss = 0.0103
Epoch 25: Train loss = 0.0062, Val loss = 0.0092
Epoch 26: Train loss = 0.0062, Val loss = 0.0091
Epoch 27: Train loss = 0.0062, Val loss = 0.0103
Epoch 28: Train loss = 0.0062, Val loss = 0.0088
Epoch 29: Train loss = 0.0061, Val loss = 0.0097
Epoch 30: Train loss = 0.0061, Val loss = 0.0103
Epoch 31: Train loss = 0.0061, Val loss = 0.0089
Epoch 32: Train loss = 0.0061, Val loss = 0.0095
Epoch 33: Train loss = 0.0060, Val loss = 0.0100
Epoch 34: Train loss = 0.0060, Val loss = 0.0096
Epoch 35: Train loss = 0.0060, Val loss = 0.0089
Epoch 36: Train loss = 0.0059, Val loss = 0.0090
Epoch 37: Train loss = 0.0060, Val loss = 0.0097
Epoch 38: Train loss = 0.0059, Val loss = 0.0100
Epoch 39: Train loss = 0.0059, Val loss = 0.0092
Epoch 40: Train loss = 0.0059, Val loss = 0.0089
Epoch 41: Train loss = 0.0059, Val loss = 0.0095
Epoch 42: Train loss = 0.0059, Val loss = 0.0103
Epoch 43: Train loss = 0.0058, Val loss = 0.0092
Epoch 44: Train loss = 0.0058, Val loss = 0.0079
Epoch 45: Train loss = 0.0058, Val loss = 0.0090
Epoch 46: Train loss = 0.0058, Val loss = 0.0095
Epoch 47: Train loss = 0.0057, Val loss = 0.0086
Epoch 48: Train loss = 0.0058, Val loss = 0.0092
Epoch 49: Train loss = 0.0057, Val loss = 0.0089
Epoch 50: Train loss = 0.0057, Val loss = 0.0086
Epoch 51: Train loss = 0.0057, Val loss = 0.0083
Epoch 52: Train loss = 0.0057, Val loss = 0.0078
Epoch 53: Train loss = 0.0056, Val loss = 0.0076
Epoch 54: Train loss = 0.0056, Val loss = 0.0071
Epoch 55: Train loss = 0.0056, Val loss = 0.0068
Epoch 56: Train loss = 0.0056, Val loss = 0.0069
Epoch 57: Train loss = 0.0056, Val loss = 0.0063
Epoch 58: Train loss = 0.0055, Val loss = 0.0063
Epoch 59: Train loss = 0.0055, Val loss = 0.0069
Epoch 60: Train loss = 0.0054, Val loss = 0.0069
Epoch 61: Train loss = 0.0055, Val loss = 0.0070
Epoch 62: Train loss = 0.0054, Val loss = 0.0064
Epoch 63: Train loss = 0.0056, Val loss = 0.0083
Epoch 64: Train loss = 0.0055, Val loss = 0.0079
Epoch 65: Train loss = 0.0054, Val loss = 0.0075
Epoch 66: Train loss = 0.0054, Val loss = 0.0061
Epoch 67: Train loss = 0.0053, Val loss = 0.0059
Epoch 68: Train loss = 0.0053, Val loss = 0.0053
Epoch 69: Train loss = 0.0053, Val loss = 0.0055
Epoch 70: Train loss = 0.0053, Val loss = 0.0058
Epoch 71: Train loss = 0.0052, Val loss = 0.0068
Epoch 72: Train loss = 0.0053, Val loss = 0.0057
Epoch 73: Train loss = 0.0052, Val loss = 0.0048
Epoch 74: Train loss = 0.0053, Val loss = 0.0052
Epoch 75: Train loss = 0.0052, Val loss = 0.0066
Epoch 76: Train loss = 0.0052, Val loss = 0.0062
Epoch 77: Train loss = 0.0052, Val loss = 0.0063
Epoch 78: Train loss = 0.0051, Val loss = 0.0054
Epoch 79: Train loss = 0.0051, Val loss = 0.0060
Epoch 80: Train loss = 0.0051, Val loss = 0.0058
Epoch 81: Train loss = 0.0052, Val loss = 0.0055
Epoch 82: Train loss = 0.0051, Val loss = 0.0068
Epoch 83: Train loss = 0.0051, Val loss = 0.0060
Early stopping triggered at epoch 83.
Using device: mps
Epoch 1: Train loss = 0.0505, Val loss = 0.0313
Epoch 2: Train loss = 0.0479, Val loss = 0.0297
Epoch 3: Train loss = 0.0460, Val loss = 0.0282
Epoch 4: Train loss = 0.0440, Val loss = 0.0268
Epoch 5: Train loss = 0.0422, Val loss = 0.0254
Epoch 6: Train loss = 0.0403, Val loss = 0.0240
Epoch 7: Train loss = 0.0380, Val loss = 0.0225
Epoch 8: Train loss = 0.0356, Val loss = 0.0208
Epoch 9: Train loss = 0.0331, Val loss = 0.0188
Epoch 10: Train loss = 0.0302, Val loss = 0.0169
Epoch 11: Train loss = 0.0274, Val loss = 0.0151
Epoch 12: Train loss = 0.0248, Val loss = 0.0137
Epoch 13: Train loss = 0.0224, Val loss = 0.0126
Epoch 14: Train loss = 0.0202, Val loss = 0.0117
Epoch 15: Train loss = 0.0183, Val loss = 0.0108
Epoch 16: Train loss = 0.0163, Val loss = 0.0100
Epoch 17: Train loss = 0.0148, Val loss = 0.0093
Epoch 18: Train loss = 0.0132, Val loss = 0.0088
Epoch 19: Train loss = 0.0120, Val loss = 0.0087
Epoch 20: Train loss = 0.0111, Val loss = 0.0087
Epoch 21: Train loss = 0.0103, Val loss = 0.0088
Epoch 22: Train loss = 0.0096, Val loss = 0.0088
Epoch 23: Train loss = 0.0091, Val loss = 0.0088
Epoch 24: Train loss = 0.0088, Val loss = 0.0087
Epoch 25: Train loss = 0.0084, Val loss = 0.0087
Epoch 26: Train loss = 0.0081, Val loss = 0.0088
Epoch 27: Train loss = 0.0079, Val loss = 0.0088
Epoch 28: Train loss = 0.0077, Val loss = 0.0088
Epoch 29: Train loss = 0.0076, Val loss = 0.0087
Epoch 30: Train loss = 0.0074, Val loss = 0.0088
Epoch 31: Train loss = 0.0072, Val loss = 0.0088
Epoch 32: Train loss = 0.0072, Val loss = 0.0087
Epoch 33: Train loss = 0.0070, Val loss = 0.0088
Epoch 34: Train loss = 0.0069, Val loss = 0.0089
Epoch 35: Train loss = 0.0069, Val loss = 0.0090
Epoch 36: Train loss = 0.0068, Val loss = 0.0089
Epoch 37: Train loss = 0.0068, Val loss = 0.0090
Epoch 38: Train loss = 0.0068, Val loss = 0.0091
Epoch 39: Train loss = 0.0067, Val loss = 0.0091
Epoch 40: Train loss = 0.0067, Val loss = 0.0092
Epoch 41: Train loss = 0.0066, Val loss = 0.0091
Epoch 42: Train loss = 0.0066, Val loss = 0.0091
Epoch 43: Train loss = 0.0066, Val loss = 0.0092
Epoch 44: Train loss = 0.0066, Val loss = 0.0093
Epoch 45: Train loss = 0.0066, Val loss = 0.0093
Epoch 46: Train loss = 0.0065, Val loss = 0.0093
Epoch 47: Train loss = 0.0066, Val loss = 0.0093
Epoch 48: Train loss = 0.0066, Val loss = 0.0094
Epoch 49: Train loss = 0.0065, Val loss = 0.0095
Epoch 50: Train loss = 0.0065, Val loss = 0.0094
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0470, Val loss = 0.0237
Epoch 2: Train loss = 0.0300, Val loss = 0.0254
Epoch 3: Train loss = 0.0200, Val loss = 0.0140
Epoch 4: Train loss = 0.0127, Val loss = 0.0098
Epoch 5: Train loss = 0.0089, Val loss = 0.0110
Epoch 6: Train loss = 0.0073, Val loss = 0.0090
Epoch 7: Train loss = 0.0069, Val loss = 0.0089
Epoch 8: Train loss = 0.0068, Val loss = 0.0099
Epoch 9: Train loss = 0.0066, Val loss = 0.0084
Epoch 10: Train loss = 0.0065, Val loss = 0.0101
Epoch 11: Train loss = 0.0065, Val loss = 0.0090
Epoch 12: Train loss = 0.0065, Val loss = 0.0098
Epoch 13: Train loss = 0.0064, Val loss = 0.0098
Epoch 14: Train loss = 0.0064, Val loss = 0.0100
Epoch 15: Train loss = 0.0063, Val loss = 0.0098
Epoch 16: Train loss = 0.0063, Val loss = 0.0096
Epoch 17: Train loss = 0.0063, Val loss = 0.0099
Epoch 18: Train loss = 0.0063, Val loss = 0.0097
Epoch 19: Train loss = 0.0063, Val loss = 0.0099
Epoch 20: Train loss = 0.0062, Val loss = 0.0097
Epoch 21: Train loss = 0.0063, Val loss = 0.0102
Epoch 22: Train loss = 0.0062, Val loss = 0.0094
Epoch 23: Train loss = 0.0062, Val loss = 0.0101
Epoch 24: Train loss = 0.0062, Val loss = 0.0095
Epoch 25: Train loss = 0.0062, Val loss = 0.0106
Epoch 26: Train loss = 0.0062, Val loss = 0.0086
Epoch 27: Train loss = 0.0062, Val loss = 0.0109
Epoch 28: Train loss = 0.0061, Val loss = 0.0093
Epoch 29: Train loss = 0.0061, Val loss = 0.0098
Epoch 30: Train loss = 0.0060, Val loss = 0.0097
Epoch 31: Train loss = 0.0061, Val loss = 0.0089
Epoch 32: Train loss = 0.0061, Val loss = 0.0090
Epoch 33: Train loss = 0.0060, Val loss = 0.0099
Epoch 34: Train loss = 0.0060, Val loss = 0.0078
Epoch 35: Train loss = 0.0060, Val loss = 0.0101
Epoch 36: Train loss = 0.0060, Val loss = 0.0105
Epoch 37: Train loss = 0.0060, Val loss = 0.0087
Epoch 38: Train loss = 0.0059, Val loss = 0.0075
Epoch 39: Train loss = 0.0059, Val loss = 0.0087
Epoch 40: Train loss = 0.0059, Val loss = 0.0099
Epoch 41: Train loss = 0.0059, Val loss = 0.0098
Epoch 42: Train loss = 0.0058, Val loss = 0.0091
Epoch 43: Train loss = 0.0058, Val loss = 0.0082
Epoch 44: Train loss = 0.0058, Val loss = 0.0076
Epoch 45: Train loss = 0.0058, Val loss = 0.0080
Epoch 46: Train loss = 0.0058, Val loss = 0.0096
Epoch 47: Train loss = 0.0057, Val loss = 0.0086
Epoch 48: Train loss = 0.0057, Val loss = 0.0093
Epoch 49: Train loss = 0.0057, Val loss = 0.0084
Epoch 50: Train loss = 0.0057, Val loss = 0.0084
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0603, Val loss = 0.0412
Epoch 2: Train loss = 0.0592, Val loss = 0.0403
Epoch 3: Train loss = 0.0583, Val loss = 0.0395
Epoch 4: Train loss = 0.0573, Val loss = 0.0387
Epoch 5: Train loss = 0.0562, Val loss = 0.0379
Epoch 6: Train loss = 0.0556, Val loss = 0.0369
Epoch 7: Train loss = 0.0541, Val loss = 0.0357
Epoch 8: Train loss = 0.0527, Val loss = 0.0344
Epoch 9: Train loss = 0.0515, Val loss = 0.0330
Epoch 10: Train loss = 0.0497, Val loss = 0.0315
Epoch 11: Train loss = 0.0478, Val loss = 0.0298
Epoch 12: Train loss = 0.0459, Val loss = 0.0280
Epoch 13: Train loss = 0.0435, Val loss = 0.0261
Epoch 14: Train loss = 0.0411, Val loss = 0.0241
Epoch 15: Train loss = 0.0384, Val loss = 0.0221
Epoch 16: Train loss = 0.0359, Val loss = 0.0203
Epoch 17: Train loss = 0.0329, Val loss = 0.0189
Epoch 18: Train loss = 0.0303, Val loss = 0.0179
Epoch 19: Train loss = 0.0279, Val loss = 0.0171
Epoch 20: Train loss = 0.0257, Val loss = 0.0164
Epoch 21: Train loss = 0.0237, Val loss = 0.0158
Epoch 22: Train loss = 0.0218, Val loss = 0.0151
Epoch 23: Train loss = 0.0201, Val loss = 0.0144
Epoch 24: Train loss = 0.0185, Val loss = 0.0137
Epoch 25: Train loss = 0.0171, Val loss = 0.0131
Epoch 26: Train loss = 0.0158, Val loss = 0.0126
Epoch 27: Train loss = 0.0147, Val loss = 0.0121
Epoch 28: Train loss = 0.0138, Val loss = 0.0117
Epoch 29: Train loss = 0.0129, Val loss = 0.0112
Epoch 30: Train loss = 0.0121, Val loss = 0.0109
Epoch 31: Train loss = 0.0114, Val loss = 0.0107
Epoch 32: Train loss = 0.0108, Val loss = 0.0103
Epoch 33: Train loss = 0.0103, Val loss = 0.0100
Epoch 34: Train loss = 0.0099, Val loss = 0.0097
Epoch 35: Train loss = 0.0094, Val loss = 0.0096
Epoch 36: Train loss = 0.0090, Val loss = 0.0096
Epoch 37: Train loss = 0.0086, Val loss = 0.0095
Epoch 38: Train loss = 0.0083, Val loss = 0.0093
Epoch 39: Train loss = 0.0081, Val loss = 0.0092
Epoch 40: Train loss = 0.0078, Val loss = 0.0091
Epoch 41: Train loss = 0.0076, Val loss = 0.0090
Epoch 42: Train loss = 0.0074, Val loss = 0.0090
Epoch 43: Train loss = 0.0072, Val loss = 0.0090
Epoch 44: Train loss = 0.0072, Val loss = 0.0090
Epoch 45: Train loss = 0.0070, Val loss = 0.0090
Epoch 46: Train loss = 0.0069, Val loss = 0.0090
Epoch 47: Train loss = 0.0069, Val loss = 0.0089
Epoch 48: Train loss = 0.0068, Val loss = 0.0090
Epoch 49: Train loss = 0.0068, Val loss = 0.0090
Epoch 50: Train loss = 0.0067, Val loss = 0.0090
Epoch 51: Train loss = 0.0067, Val loss = 0.0090
Epoch 52: Train loss = 0.0067, Val loss = 0.0090
Epoch 53: Train loss = 0.0066, Val loss = 0.0089
Epoch 54: Train loss = 0.0067, Val loss = 0.0089
Epoch 55: Train loss = 0.0067, Val loss = 0.0091
Epoch 56: Train loss = 0.0066, Val loss = 0.0090
Early stopping triggered at epoch 56.
Using device: mps
Epoch 1: Train loss = 0.0473, Val loss = 0.0259
Epoch 2: Train loss = 0.0379, Val loss = 0.0178
Epoch 3: Train loss = 0.0260, Val loss = 0.0127
Epoch 4: Train loss = 0.0162, Val loss = 0.0174
Epoch 5: Train loss = 0.0111, Val loss = 0.0099
Epoch 6: Train loss = 0.0083, Val loss = 0.0074
Epoch 7: Train loss = 0.0074, Val loss = 0.0099
Epoch 8: Train loss = 0.0070, Val loss = 0.0078
Epoch 9: Train loss = 0.0069, Val loss = 0.0081
Epoch 10: Train loss = 0.0067, Val loss = 0.0092
Epoch 11: Train loss = 0.0067, Val loss = 0.0078
Epoch 12: Train loss = 0.0066, Val loss = 0.0091
Epoch 13: Train loss = 0.0066, Val loss = 0.0088
Epoch 14: Train loss = 0.0066, Val loss = 0.0091
Epoch 15: Train loss = 0.0065, Val loss = 0.0095
Epoch 16: Train loss = 0.0065, Val loss = 0.0092
Epoch 17: Train loss = 0.0065, Val loss = 0.0098
Epoch 18: Train loss = 0.0065, Val loss = 0.0095
Epoch 19: Train loss = 0.0065, Val loss = 0.0096
Epoch 20: Train loss = 0.0065, Val loss = 0.0099
Epoch 21: Train loss = 0.0064, Val loss = 0.0096
Epoch 22: Train loss = 0.0065, Val loss = 0.0106
Epoch 23: Train loss = 0.0064, Val loss = 0.0092
Epoch 24: Train loss = 0.0064, Val loss = 0.0110
Epoch 25: Train loss = 0.0063, Val loss = 0.0096
Epoch 26: Train loss = 0.0063, Val loss = 0.0107
Epoch 27: Train loss = 0.0063, Val loss = 0.0109
Epoch 28: Train loss = 0.0063, Val loss = 0.0096
Epoch 29: Train loss = 0.0063, Val loss = 0.0106
Epoch 30: Train loss = 0.0062, Val loss = 0.0109
Epoch 31: Train loss = 0.0063, Val loss = 0.0102
Epoch 32: Train loss = 0.0062, Val loss = 0.0096
Epoch 33: Train loss = 0.0062, Val loss = 0.0109
Epoch 34: Train loss = 0.0062, Val loss = 0.0097
Epoch 35: Train loss = 0.0061, Val loss = 0.0098
Epoch 36: Train loss = 0.0061, Val loss = 0.0114
Epoch 37: Train loss = 0.0061, Val loss = 0.0107
Epoch 38: Train loss = 0.0060, Val loss = 0.0097
Epoch 39: Train loss = 0.0060, Val loss = 0.0097
Epoch 40: Train loss = 0.0060, Val loss = 0.0094
Epoch 41: Train loss = 0.0059, Val loss = 0.0093
Epoch 42: Train loss = 0.0059, Val loss = 0.0091
Epoch 43: Train loss = 0.0059, Val loss = 0.0079
Epoch 44: Train loss = 0.0060, Val loss = 0.0093
Epoch 45: Train loss = 0.0058, Val loss = 0.0088
Epoch 46: Train loss = 0.0058, Val loss = 0.0101
Epoch 47: Train loss = 0.0058, Val loss = 0.0091
Epoch 48: Train loss = 0.0058, Val loss = 0.0077
Epoch 49: Train loss = 0.0058, Val loss = 0.0072
Epoch 50: Train loss = 0.0058, Val loss = 0.0063
Epoch 51: Train loss = 0.0057, Val loss = 0.0070
Epoch 52: Train loss = 0.0058, Val loss = 0.0077
Epoch 53: Train loss = 0.0057, Val loss = 0.0079
Epoch 54: Train loss = 0.0057, Val loss = 0.0081
Epoch 55: Train loss = 0.0057, Val loss = 0.0082
Epoch 56: Train loss = 0.0056, Val loss = 0.0082
Epoch 57: Train loss = 0.0056, Val loss = 0.0081
Epoch 58: Train loss = 0.0055, Val loss = 0.0076
Epoch 59: Train loss = 0.0055, Val loss = 0.0069
Epoch 60: Train loss = 0.0055, Val loss = 0.0065
Early stopping triggered at epoch 60.
Using device: mps
Epoch 1: Train loss = 0.0558, Val loss = 0.0369
Epoch 2: Train loss = 0.0549, Val loss = 0.0361
Epoch 3: Train loss = 0.0536, Val loss = 0.0351
Epoch 4: Train loss = 0.0526, Val loss = 0.0341
Epoch 5: Train loss = 0.0516, Val loss = 0.0332
Epoch 6: Train loss = 0.0508, Val loss = 0.0324
Epoch 7: Train loss = 0.0495, Val loss = 0.0315
Epoch 8: Train loss = 0.0483, Val loss = 0.0305
Epoch 9: Train loss = 0.0474, Val loss = 0.0293
Epoch 10: Train loss = 0.0458, Val loss = 0.0281
Epoch 11: Train loss = 0.0444, Val loss = 0.0268
Epoch 12: Train loss = 0.0426, Val loss = 0.0253
Epoch 13: Train loss = 0.0409, Val loss = 0.0239
Epoch 14: Train loss = 0.0391, Val loss = 0.0223
Epoch 15: Train loss = 0.0367, Val loss = 0.0206
Epoch 16: Train loss = 0.0344, Val loss = 0.0188
Epoch 17: Train loss = 0.0320, Val loss = 0.0171
Epoch 18: Train loss = 0.0293, Val loss = 0.0155
Epoch 19: Train loss = 0.0266, Val loss = 0.0141
Epoch 20: Train loss = 0.0238, Val loss = 0.0131
Epoch 21: Train loss = 0.0214, Val loss = 0.0124
Epoch 22: Train loss = 0.0191, Val loss = 0.0120
Epoch 23: Train loss = 0.0171, Val loss = 0.0117
Epoch 24: Train loss = 0.0154, Val loss = 0.0114
Epoch 25: Train loss = 0.0139, Val loss = 0.0109
Epoch 26: Train loss = 0.0126, Val loss = 0.0103
Epoch 27: Train loss = 0.0115, Val loss = 0.0097
Epoch 28: Train loss = 0.0105, Val loss = 0.0093
Epoch 29: Train loss = 0.0097, Val loss = 0.0091
Epoch 30: Train loss = 0.0091, Val loss = 0.0091
Epoch 31: Train loss = 0.0086, Val loss = 0.0090
Epoch 32: Train loss = 0.0082, Val loss = 0.0090
Epoch 33: Train loss = 0.0079, Val loss = 0.0090
Epoch 34: Train loss = 0.0076, Val loss = 0.0089
Epoch 35: Train loss = 0.0074, Val loss = 0.0087
Epoch 36: Train loss = 0.0073, Val loss = 0.0087
Epoch 37: Train loss = 0.0071, Val loss = 0.0089
Epoch 38: Train loss = 0.0070, Val loss = 0.0089
Epoch 39: Train loss = 0.0070, Val loss = 0.0088
Epoch 40: Train loss = 0.0068, Val loss = 0.0088
Epoch 41: Train loss = 0.0069, Val loss = 0.0087
Epoch 42: Train loss = 0.0068, Val loss = 0.0088
Epoch 43: Train loss = 0.0068, Val loss = 0.0088
Epoch 44: Train loss = 0.0067, Val loss = 0.0089
Epoch 45: Train loss = 0.0067, Val loss = 0.0088
Epoch 46: Train loss = 0.0067, Val loss = 0.0088
Epoch 47: Train loss = 0.0067, Val loss = 0.0089
Epoch 48: Train loss = 0.0066, Val loss = 0.0088
Epoch 49: Train loss = 0.0066, Val loss = 0.0088
Epoch 50: Train loss = 0.0066, Val loss = 0.0088
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0544, Val loss = 0.0298
Epoch 2: Train loss = 0.0399, Val loss = 0.0153
Epoch 3: Train loss = 0.0213, Val loss = 0.0217
Epoch 4: Train loss = 0.0134, Val loss = 0.0122
Epoch 5: Train loss = 0.0088, Val loss = 0.0066
Epoch 6: Train loss = 0.0077, Val loss = 0.0101
Epoch 7: Train loss = 0.0072, Val loss = 0.0095
Epoch 8: Train loss = 0.0069, Val loss = 0.0078
Epoch 9: Train loss = 0.0068, Val loss = 0.0094
Epoch 10: Train loss = 0.0067, Val loss = 0.0090
Epoch 11: Train loss = 0.0066, Val loss = 0.0086
Epoch 12: Train loss = 0.0065, Val loss = 0.0095
Epoch 13: Train loss = 0.0065, Val loss = 0.0087
Epoch 14: Train loss = 0.0065, Val loss = 0.0094
Epoch 15: Train loss = 0.0065, Val loss = 0.0096
Epoch 16: Train loss = 0.0065, Val loss = 0.0096
Epoch 17: Train loss = 0.0065, Val loss = 0.0099
Epoch 18: Train loss = 0.0064, Val loss = 0.0095
Epoch 19: Train loss = 0.0064, Val loss = 0.0101
Epoch 20: Train loss = 0.0064, Val loss = 0.0099
Epoch 21: Train loss = 0.0064, Val loss = 0.0098
Epoch 22: Train loss = 0.0064, Val loss = 0.0103
Epoch 23: Train loss = 0.0064, Val loss = 0.0100
Epoch 24: Train loss = 0.0064, Val loss = 0.0104
Epoch 25: Train loss = 0.0063, Val loss = 0.0103
Epoch 26: Train loss = 0.0063, Val loss = 0.0109
Epoch 27: Train loss = 0.0063, Val loss = 0.0098
Epoch 28: Train loss = 0.0063, Val loss = 0.0108
Epoch 29: Train loss = 0.0063, Val loss = 0.0103
Epoch 30: Train loss = 0.0062, Val loss = 0.0108
Epoch 31: Train loss = 0.0062, Val loss = 0.0101
Epoch 32: Train loss = 0.0063, Val loss = 0.0113
Epoch 33: Train loss = 0.0062, Val loss = 0.0101
Epoch 34: Train loss = 0.0062, Val loss = 0.0109
Epoch 35: Train loss = 0.0062, Val loss = 0.0102
Epoch 36: Train loss = 0.0062, Val loss = 0.0109
Epoch 37: Train loss = 0.0061, Val loss = 0.0108
Epoch 38: Train loss = 0.0061, Val loss = 0.0109
Epoch 39: Train loss = 0.0061, Val loss = 0.0112
Epoch 40: Train loss = 0.0061, Val loss = 0.0083
Epoch 41: Train loss = 0.0061, Val loss = 0.0107
Epoch 42: Train loss = 0.0061, Val loss = 0.0112
Epoch 43: Train loss = 0.0061, Val loss = 0.0096
Epoch 44: Train loss = 0.0061, Val loss = 0.0093
Epoch 45: Train loss = 0.0060, Val loss = 0.0093
Epoch 46: Train loss = 0.0060, Val loss = 0.0093
Epoch 47: Train loss = 0.0060, Val loss = 0.0100
Epoch 48: Train loss = 0.0059, Val loss = 0.0100
Epoch 49: Train loss = 0.0059, Val loss = 0.0093
Epoch 50: Train loss = 0.0059, Val loss = 0.0104
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0525, Val loss = 0.0299
Epoch 2: Train loss = 0.0451, Val loss = 0.0240
Epoch 3: Train loss = 0.0387, Val loss = 0.0189
Epoch 4: Train loss = 0.0322, Val loss = 0.0141
Epoch 5: Train loss = 0.0253, Val loss = 0.0103
Epoch 6: Train loss = 0.0186, Val loss = 0.0089
Epoch 7: Train loss = 0.0130, Val loss = 0.0100
Epoch 8: Train loss = 0.0095, Val loss = 0.0114
Epoch 9: Train loss = 0.0080, Val loss = 0.0115
Epoch 10: Train loss = 0.0074, Val loss = 0.0106
Epoch 11: Train loss = 0.0072, Val loss = 0.0099
Epoch 12: Train loss = 0.0071, Val loss = 0.0092
Epoch 13: Train loss = 0.0069, Val loss = 0.0091
Epoch 14: Train loss = 0.0068, Val loss = 0.0092
Epoch 15: Train loss = 0.0067, Val loss = 0.0093
Epoch 16: Train loss = 0.0066, Val loss = 0.0093
Epoch 17: Train loss = 0.0065, Val loss = 0.0092
Epoch 18: Train loss = 0.0065, Val loss = 0.0092
Epoch 19: Train loss = 0.0065, Val loss = 0.0092
Epoch 20: Train loss = 0.0064, Val loss = 0.0098
Epoch 21: Train loss = 0.0065, Val loss = 0.0096
Epoch 22: Train loss = 0.0064, Val loss = 0.0095
Epoch 23: Train loss = 0.0064, Val loss = 0.0095
Epoch 24: Train loss = 0.0064, Val loss = 0.0097
Epoch 25: Train loss = 0.0064, Val loss = 0.0097
Epoch 26: Train loss = 0.0064, Val loss = 0.0099
Epoch 27: Train loss = 0.0063, Val loss = 0.0098
Epoch 28: Train loss = 0.0063, Val loss = 0.0096
Epoch 29: Train loss = 0.0064, Val loss = 0.0098
Epoch 30: Train loss = 0.0063, Val loss = 0.0101
Epoch 31: Train loss = 0.0063, Val loss = 0.0101
Epoch 32: Train loss = 0.0063, Val loss = 0.0099
Epoch 33: Train loss = 0.0063, Val loss = 0.0099
Epoch 34: Train loss = 0.0063, Val loss = 0.0103
Epoch 35: Train loss = 0.0062, Val loss = 0.0101
Epoch 36: Train loss = 0.0063, Val loss = 0.0101
Epoch 37: Train loss = 0.0062, Val loss = 0.0104
Epoch 38: Train loss = 0.0063, Val loss = 0.0100
Epoch 39: Train loss = 0.0062, Val loss = 0.0105
Epoch 40: Train loss = 0.0062, Val loss = 0.0104
Epoch 41: Train loss = 0.0062, Val loss = 0.0101
Epoch 42: Train loss = 0.0062, Val loss = 0.0104
Epoch 43: Train loss = 0.0062, Val loss = 0.0102
Epoch 44: Train loss = 0.0062, Val loss = 0.0104
Epoch 45: Train loss = 0.0061, Val loss = 0.0105
Epoch 46: Train loss = 0.0062, Val loss = 0.0103
Epoch 47: Train loss = 0.0061, Val loss = 0.0104
Epoch 48: Train loss = 0.0061, Val loss = 0.0104
Epoch 49: Train loss = 0.0061, Val loss = 0.0104
Epoch 50: Train loss = 0.0061, Val loss = 0.0107
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0270, Val loss = 0.0189
Epoch 2: Train loss = 0.0085, Val loss = 0.0096
Epoch 3: Train loss = 0.0073, Val loss = 0.0077
Epoch 4: Train loss = 0.0068, Val loss = 0.0116
Epoch 5: Train loss = 0.0067, Val loss = 0.0090
Epoch 6: Train loss = 0.0065, Val loss = 0.0086
Epoch 7: Train loss = 0.0064, Val loss = 0.0125
Epoch 8: Train loss = 0.0064, Val loss = 0.0095
Epoch 9: Train loss = 0.0063, Val loss = 0.0109
Epoch 10: Train loss = 0.0062, Val loss = 0.0100
Epoch 11: Train loss = 0.0062, Val loss = 0.0083
Epoch 12: Train loss = 0.0062, Val loss = 0.0101
Epoch 13: Train loss = 0.0062, Val loss = 0.0119
Epoch 14: Train loss = 0.0061, Val loss = 0.0095
Epoch 15: Train loss = 0.0061, Val loss = 0.0091
Epoch 16: Train loss = 0.0060, Val loss = 0.0092
Epoch 17: Train loss = 0.0060, Val loss = 0.0109
Epoch 18: Train loss = 0.0060, Val loss = 0.0110
Epoch 19: Train loss = 0.0059, Val loss = 0.0106
Epoch 20: Train loss = 0.0059, Val loss = 0.0106
Epoch 21: Train loss = 0.0058, Val loss = 0.0097
Epoch 22: Train loss = 0.0058, Val loss = 0.0109
Epoch 23: Train loss = 0.0058, Val loss = 0.0099
Epoch 24: Train loss = 0.0057, Val loss = 0.0087
Epoch 25: Train loss = 0.0057, Val loss = 0.0075
Epoch 26: Train loss = 0.0057, Val loss = 0.0079
Epoch 27: Train loss = 0.0057, Val loss = 0.0090
Epoch 28: Train loss = 0.0057, Val loss = 0.0098
Epoch 29: Train loss = 0.0056, Val loss = 0.0073
Epoch 30: Train loss = 0.0055, Val loss = 0.0076
Epoch 31: Train loss = 0.0054, Val loss = 0.0084
Epoch 32: Train loss = 0.0054, Val loss = 0.0090
Epoch 33: Train loss = 0.0055, Val loss = 0.0056
Epoch 34: Train loss = 0.0054, Val loss = 0.0063
Epoch 35: Train loss = 0.0053, Val loss = 0.0084
Epoch 36: Train loss = 0.0053, Val loss = 0.0051
Epoch 37: Train loss = 0.0053, Val loss = 0.0085
Epoch 38: Train loss = 0.0053, Val loss = 0.0051
Epoch 39: Train loss = 0.0052, Val loss = 0.0075
Epoch 40: Train loss = 0.0051, Val loss = 0.0061
Epoch 41: Train loss = 0.0051, Val loss = 0.0071
Epoch 42: Train loss = 0.0052, Val loss = 0.0076
Epoch 43: Train loss = 0.0052, Val loss = 0.0041
Epoch 44: Train loss = 0.0053, Val loss = 0.0071
Epoch 45: Train loss = 0.0050, Val loss = 0.0069
Epoch 46: Train loss = 0.0050, Val loss = 0.0043
Epoch 47: Train loss = 0.0050, Val loss = 0.0073
Epoch 48: Train loss = 0.0050, Val loss = 0.0049
Epoch 49: Train loss = 0.0049, Val loss = 0.0054
Epoch 50: Train loss = 0.0049, Val loss = 0.0079
Epoch 51: Train loss = 0.0049, Val loss = 0.0048
Epoch 52: Train loss = 0.0048, Val loss = 0.0063
Epoch 53: Train loss = 0.0047, Val loss = 0.0055
Early stopping triggered at epoch 53.
Using device: mps
Epoch 1: Train loss = 0.0523, Val loss = 0.0287
Epoch 2: Train loss = 0.0444, Val loss = 0.0215
Epoch 3: Train loss = 0.0364, Val loss = 0.0147
Epoch 4: Train loss = 0.0272, Val loss = 0.0096
Epoch 5: Train loss = 0.0188, Val loss = 0.0093
Epoch 6: Train loss = 0.0131, Val loss = 0.0140
Epoch 7: Train loss = 0.0105, Val loss = 0.0162
Epoch 8: Train loss = 0.0088, Val loss = 0.0132
Epoch 9: Train loss = 0.0076, Val loss = 0.0104
Epoch 10: Train loss = 0.0073, Val loss = 0.0094
Epoch 11: Train loss = 0.0072, Val loss = 0.0099
Epoch 12: Train loss = 0.0070, Val loss = 0.0107
Epoch 13: Train loss = 0.0068, Val loss = 0.0107
Epoch 14: Train loss = 0.0068, Val loss = 0.0102
Epoch 15: Train loss = 0.0067, Val loss = 0.0099
Epoch 16: Train loss = 0.0066, Val loss = 0.0099
Epoch 17: Train loss = 0.0066, Val loss = 0.0100
Epoch 18: Train loss = 0.0066, Val loss = 0.0102
Epoch 19: Train loss = 0.0065, Val loss = 0.0100
Epoch 20: Train loss = 0.0065, Val loss = 0.0099
Epoch 21: Train loss = 0.0064, Val loss = 0.0101
Epoch 22: Train loss = 0.0065, Val loss = 0.0100
Epoch 23: Train loss = 0.0064, Val loss = 0.0102
Epoch 24: Train loss = 0.0064, Val loss = 0.0102
Epoch 25: Train loss = 0.0064, Val loss = 0.0101
Epoch 26: Train loss = 0.0064, Val loss = 0.0100
Epoch 27: Train loss = 0.0064, Val loss = 0.0101
Epoch 28: Train loss = 0.0064, Val loss = 0.0101
Epoch 29: Train loss = 0.0063, Val loss = 0.0103
Epoch 30: Train loss = 0.0063, Val loss = 0.0102
Epoch 31: Train loss = 0.0063, Val loss = 0.0100
Epoch 32: Train loss = 0.0063, Val loss = 0.0103
Epoch 33: Train loss = 0.0063, Val loss = 0.0101
Epoch 34: Train loss = 0.0063, Val loss = 0.0103
Epoch 35: Train loss = 0.0063, Val loss = 0.0101
Epoch 36: Train loss = 0.0062, Val loss = 0.0102
Epoch 37: Train loss = 0.0062, Val loss = 0.0101
Epoch 38: Train loss = 0.0063, Val loss = 0.0104
Epoch 39: Train loss = 0.0062, Val loss = 0.0100
Epoch 40: Train loss = 0.0062, Val loss = 0.0102
Epoch 41: Train loss = 0.0062, Val loss = 0.0103
Epoch 42: Train loss = 0.0063, Val loss = 0.0103
Epoch 43: Train loss = 0.0062, Val loss = 0.0103
Epoch 44: Train loss = 0.0062, Val loss = 0.0102
Epoch 45: Train loss = 0.0062, Val loss = 0.0099
Epoch 46: Train loss = 0.0062, Val loss = 0.0104
Epoch 47: Train loss = 0.0062, Val loss = 0.0100
Epoch 48: Train loss = 0.0062, Val loss = 0.0100
Epoch 49: Train loss = 0.0062, Val loss = 0.0105
Epoch 50: Train loss = 0.0061, Val loss = 0.0099
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0250, Val loss = 0.0248
Epoch 2: Train loss = 0.0092, Val loss = 0.0068
Epoch 3: Train loss = 0.0076, Val loss = 0.0116
Epoch 4: Train loss = 0.0069, Val loss = 0.0085
Epoch 5: Train loss = 0.0066, Val loss = 0.0099
Epoch 6: Train loss = 0.0065, Val loss = 0.0103
Epoch 7: Train loss = 0.0064, Val loss = 0.0099
Epoch 8: Train loss = 0.0064, Val loss = 0.0103
Epoch 9: Train loss = 0.0064, Val loss = 0.0104
Epoch 10: Train loss = 0.0063, Val loss = 0.0098
Epoch 11: Train loss = 0.0063, Val loss = 0.0101
Epoch 12: Train loss = 0.0062, Val loss = 0.0098
Epoch 13: Train loss = 0.0062, Val loss = 0.0111
Epoch 14: Train loss = 0.0061, Val loss = 0.0113
Epoch 15: Train loss = 0.0061, Val loss = 0.0090
Epoch 16: Train loss = 0.0061, Val loss = 0.0106
Epoch 17: Train loss = 0.0061, Val loss = 0.0113
Epoch 18: Train loss = 0.0060, Val loss = 0.0093
Epoch 19: Train loss = 0.0060, Val loss = 0.0091
Epoch 20: Train loss = 0.0059, Val loss = 0.0105
Epoch 21: Train loss = 0.0059, Val loss = 0.0087
Epoch 22: Train loss = 0.0058, Val loss = 0.0093
Epoch 23: Train loss = 0.0058, Val loss = 0.0085
Epoch 24: Train loss = 0.0058, Val loss = 0.0073
Epoch 25: Train loss = 0.0058, Val loss = 0.0086
Epoch 26: Train loss = 0.0057, Val loss = 0.0084
Epoch 27: Train loss = 0.0057, Val loss = 0.0068
Epoch 28: Train loss = 0.0058, Val loss = 0.0084
Epoch 29: Train loss = 0.0056, Val loss = 0.0091
Epoch 30: Train loss = 0.0056, Val loss = 0.0075
Epoch 31: Train loss = 0.0055, Val loss = 0.0072
Epoch 32: Train loss = 0.0054, Val loss = 0.0077
Epoch 33: Train loss = 0.0054, Val loss = 0.0068
Epoch 34: Train loss = 0.0054, Val loss = 0.0060
Epoch 35: Train loss = 0.0054, Val loss = 0.0093
Epoch 36: Train loss = 0.0054, Val loss = 0.0046
Epoch 37: Train loss = 0.0055, Val loss = 0.0079
Epoch 38: Train loss = 0.0053, Val loss = 0.0063
Epoch 39: Train loss = 0.0052, Val loss = 0.0061
Epoch 40: Train loss = 0.0052, Val loss = 0.0065
Epoch 41: Train loss = 0.0052, Val loss = 0.0065
Epoch 42: Train loss = 0.0051, Val loss = 0.0064
Epoch 43: Train loss = 0.0052, Val loss = 0.0051
Epoch 44: Train loss = 0.0051, Val loss = 0.0086
Epoch 45: Train loss = 0.0053, Val loss = 0.0047
Epoch 46: Train loss = 0.0051, Val loss = 0.0084
Epoch 47: Train loss = 0.0050, Val loss = 0.0051
Epoch 48: Train loss = 0.0049, Val loss = 0.0072
Epoch 49: Train loss = 0.0048, Val loss = 0.0062
Epoch 50: Train loss = 0.0049, Val loss = 0.0070
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0536, Val loss = 0.0334
Epoch 2: Train loss = 0.0511, Val loss = 0.0310
Epoch 3: Train loss = 0.0481, Val loss = 0.0279
Epoch 4: Train loss = 0.0444, Val loss = 0.0236
Epoch 5: Train loss = 0.0388, Val loss = 0.0179
Epoch 6: Train loss = 0.0313, Val loss = 0.0112
Epoch 7: Train loss = 0.0221, Val loss = 0.0064
Epoch 8: Train loss = 0.0135, Val loss = 0.0098
Epoch 9: Train loss = 0.0102, Val loss = 0.0174
Epoch 10: Train loss = 0.0091, Val loss = 0.0136
Epoch 11: Train loss = 0.0075, Val loss = 0.0085
Epoch 12: Train loss = 0.0072, Val loss = 0.0073
Epoch 13: Train loss = 0.0072, Val loss = 0.0085
Epoch 14: Train loss = 0.0070, Val loss = 0.0100
Epoch 15: Train loss = 0.0068, Val loss = 0.0099
Epoch 16: Train loss = 0.0067, Val loss = 0.0088
Epoch 17: Train loss = 0.0067, Val loss = 0.0085
Epoch 18: Train loss = 0.0066, Val loss = 0.0092
Epoch 19: Train loss = 0.0066, Val loss = 0.0094
Epoch 20: Train loss = 0.0066, Val loss = 0.0090
Epoch 21: Train loss = 0.0066, Val loss = 0.0090
Epoch 22: Train loss = 0.0065, Val loss = 0.0092
Epoch 23: Train loss = 0.0064, Val loss = 0.0097
Epoch 24: Train loss = 0.0065, Val loss = 0.0088
Epoch 25: Train loss = 0.0064, Val loss = 0.0091
Epoch 26: Train loss = 0.0065, Val loss = 0.0094
Epoch 27: Train loss = 0.0064, Val loss = 0.0097
Epoch 28: Train loss = 0.0064, Val loss = 0.0093
Epoch 29: Train loss = 0.0064, Val loss = 0.0093
Epoch 30: Train loss = 0.0064, Val loss = 0.0098
Epoch 31: Train loss = 0.0064, Val loss = 0.0096
Epoch 32: Train loss = 0.0064, Val loss = 0.0094
Epoch 33: Train loss = 0.0064, Val loss = 0.0099
Epoch 34: Train loss = 0.0064, Val loss = 0.0095
Epoch 35: Train loss = 0.0063, Val loss = 0.0097
Epoch 36: Train loss = 0.0063, Val loss = 0.0098
Epoch 37: Train loss = 0.0064, Val loss = 0.0094
Epoch 38: Train loss = 0.0063, Val loss = 0.0102
Epoch 39: Train loss = 0.0063, Val loss = 0.0097
Epoch 40: Train loss = 0.0063, Val loss = 0.0097
Epoch 41: Train loss = 0.0063, Val loss = 0.0103
Epoch 42: Train loss = 0.0063, Val loss = 0.0100
Epoch 43: Train loss = 0.0063, Val loss = 0.0097
Epoch 44: Train loss = 0.0063, Val loss = 0.0101
Epoch 45: Train loss = 0.0063, Val loss = 0.0098
Epoch 46: Train loss = 0.0063, Val loss = 0.0101
Epoch 47: Train loss = 0.0063, Val loss = 0.0099
Epoch 48: Train loss = 0.0063, Val loss = 0.0100
Epoch 49: Train loss = 0.0062, Val loss = 0.0102
Epoch 50: Train loss = 0.0062, Val loss = 0.0099
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0414, Val loss = 0.0092
Epoch 2: Train loss = 0.0125, Val loss = 0.0054
Epoch 3: Train loss = 0.0094, Val loss = 0.0100
Epoch 4: Train loss = 0.0079, Val loss = 0.0072
Epoch 5: Train loss = 0.0070, Val loss = 0.0096
Epoch 6: Train loss = 0.0068, Val loss = 0.0081
Epoch 7: Train loss = 0.0066, Val loss = 0.0101
Epoch 8: Train loss = 0.0065, Val loss = 0.0082
Epoch 9: Train loss = 0.0065, Val loss = 0.0105
Epoch 10: Train loss = 0.0064, Val loss = 0.0090
Epoch 11: Train loss = 0.0064, Val loss = 0.0100
Epoch 12: Train loss = 0.0064, Val loss = 0.0106
Epoch 13: Train loss = 0.0064, Val loss = 0.0102
Epoch 14: Train loss = 0.0063, Val loss = 0.0095
Epoch 15: Train loss = 0.0064, Val loss = 0.0098
Epoch 16: Train loss = 0.0063, Val loss = 0.0109
Epoch 17: Train loss = 0.0063, Val loss = 0.0090
Epoch 18: Train loss = 0.0063, Val loss = 0.0092
Epoch 19: Train loss = 0.0062, Val loss = 0.0116
Epoch 20: Train loss = 0.0062, Val loss = 0.0102
Epoch 21: Train loss = 0.0062, Val loss = 0.0079
Epoch 22: Train loss = 0.0062, Val loss = 0.0095
Epoch 23: Train loss = 0.0063, Val loss = 0.0124
Epoch 24: Train loss = 0.0062, Val loss = 0.0107
Epoch 25: Train loss = 0.0061, Val loss = 0.0086
Epoch 26: Train loss = 0.0061, Val loss = 0.0094
Epoch 27: Train loss = 0.0061, Val loss = 0.0115
Epoch 28: Train loss = 0.0060, Val loss = 0.0109
Epoch 29: Train loss = 0.0061, Val loss = 0.0074
Epoch 30: Train loss = 0.0060, Val loss = 0.0088
Epoch 31: Train loss = 0.0060, Val loss = 0.0101
Epoch 32: Train loss = 0.0060, Val loss = 0.0106
Epoch 33: Train loss = 0.0059, Val loss = 0.0105
Epoch 34: Train loss = 0.0059, Val loss = 0.0087
Epoch 35: Train loss = 0.0058, Val loss = 0.0098
Epoch 36: Train loss = 0.0058, Val loss = 0.0082
Epoch 37: Train loss = 0.0058, Val loss = 0.0101
Epoch 38: Train loss = 0.0058, Val loss = 0.0091
Epoch 39: Train loss = 0.0057, Val loss = 0.0089
Epoch 40: Train loss = 0.0056, Val loss = 0.0079
Epoch 41: Train loss = 0.0056, Val loss = 0.0074
Epoch 42: Train loss = 0.0056, Val loss = 0.0090
Epoch 43: Train loss = 0.0055, Val loss = 0.0063
Epoch 44: Train loss = 0.0055, Val loss = 0.0073
Epoch 45: Train loss = 0.0054, Val loss = 0.0058
Epoch 46: Train loss = 0.0056, Val loss = 0.0065
Epoch 47: Train loss = 0.0054, Val loss = 0.0063
Epoch 48: Train loss = 0.0053, Val loss = 0.0056
Epoch 49: Train loss = 0.0052, Val loss = 0.0058
Epoch 50: Train loss = 0.0052, Val loss = 0.0057
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0520, Val loss = 0.0323
Epoch 2: Train loss = 0.0495, Val loss = 0.0298
Epoch 3: Train loss = 0.0465, Val loss = 0.0268
Epoch 4: Train loss = 0.0425, Val loss = 0.0231
Epoch 5: Train loss = 0.0377, Val loss = 0.0181
Epoch 6: Train loss = 0.0309, Val loss = 0.0123
Epoch 7: Train loss = 0.0225, Val loss = 0.0085
Epoch 8: Train loss = 0.0150, Val loss = 0.0108
Epoch 9: Train loss = 0.0112, Val loss = 0.0147
Epoch 10: Train loss = 0.0091, Val loss = 0.0116
Epoch 11: Train loss = 0.0077, Val loss = 0.0082
Epoch 12: Train loss = 0.0072, Val loss = 0.0078
Epoch 13: Train loss = 0.0071, Val loss = 0.0089
Epoch 14: Train loss = 0.0070, Val loss = 0.0096
Epoch 15: Train loss = 0.0068, Val loss = 0.0090
Epoch 16: Train loss = 0.0068, Val loss = 0.0084
Epoch 17: Train loss = 0.0067, Val loss = 0.0088
Epoch 18: Train loss = 0.0067, Val loss = 0.0091
Epoch 19: Train loss = 0.0066, Val loss = 0.0091
Epoch 20: Train loss = 0.0066, Val loss = 0.0088
Epoch 21: Train loss = 0.0065, Val loss = 0.0089
Epoch 22: Train loss = 0.0066, Val loss = 0.0091
Epoch 23: Train loss = 0.0065, Val loss = 0.0091
Epoch 24: Train loss = 0.0065, Val loss = 0.0092
Epoch 25: Train loss = 0.0065, Val loss = 0.0092
Epoch 26: Train loss = 0.0064, Val loss = 0.0096
Epoch 27: Train loss = 0.0065, Val loss = 0.0093
Epoch 28: Train loss = 0.0064, Val loss = 0.0095
Epoch 29: Train loss = 0.0064, Val loss = 0.0095
Epoch 30: Train loss = 0.0064, Val loss = 0.0094
Epoch 31: Train loss = 0.0064, Val loss = 0.0097
Epoch 32: Train loss = 0.0064, Val loss = 0.0098
Epoch 33: Train loss = 0.0064, Val loss = 0.0095
Epoch 34: Train loss = 0.0064, Val loss = 0.0098
Epoch 35: Train loss = 0.0064, Val loss = 0.0100
Epoch 36: Train loss = 0.0064, Val loss = 0.0099
Epoch 37: Train loss = 0.0063, Val loss = 0.0100
Epoch 38: Train loss = 0.0064, Val loss = 0.0099
Epoch 39: Train loss = 0.0064, Val loss = 0.0102
Epoch 40: Train loss = 0.0064, Val loss = 0.0100
Epoch 41: Train loss = 0.0064, Val loss = 0.0100
Epoch 42: Train loss = 0.0063, Val loss = 0.0104
Epoch 43: Train loss = 0.0063, Val loss = 0.0101
Epoch 44: Train loss = 0.0063, Val loss = 0.0099
Epoch 45: Train loss = 0.0064, Val loss = 0.0105
Epoch 46: Train loss = 0.0063, Val loss = 0.0105
Epoch 47: Train loss = 0.0063, Val loss = 0.0100
Epoch 48: Train loss = 0.0063, Val loss = 0.0109
Epoch 49: Train loss = 0.0063, Val loss = 0.0105
Epoch 50: Train loss = 0.0062, Val loss = 0.0103
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0342, Val loss = 0.0093
Epoch 2: Train loss = 0.0113, Val loss = 0.0045
Epoch 3: Train loss = 0.0086, Val loss = 0.0112
Epoch 4: Train loss = 0.0075, Val loss = 0.0064
Epoch 5: Train loss = 0.0068, Val loss = 0.0102
Epoch 6: Train loss = 0.0066, Val loss = 0.0079
Epoch 7: Train loss = 0.0065, Val loss = 0.0097
Epoch 8: Train loss = 0.0064, Val loss = 0.0091
Epoch 9: Train loss = 0.0065, Val loss = 0.0106
Epoch 10: Train loss = 0.0064, Val loss = 0.0101
Epoch 11: Train loss = 0.0064, Val loss = 0.0095
Epoch 12: Train loss = 0.0064, Val loss = 0.0105
Epoch 13: Train loss = 0.0063, Val loss = 0.0106
Epoch 14: Train loss = 0.0064, Val loss = 0.0126
Epoch 15: Train loss = 0.0063, Val loss = 0.0073
Epoch 16: Train loss = 0.0064, Val loss = 0.0123
Epoch 17: Train loss = 0.0063, Val loss = 0.0110
Epoch 18: Train loss = 0.0063, Val loss = 0.0103
Epoch 19: Train loss = 0.0063, Val loss = 0.0076
Epoch 20: Train loss = 0.0063, Val loss = 0.0099
Epoch 21: Train loss = 0.0062, Val loss = 0.0114
Epoch 22: Train loss = 0.0061, Val loss = 0.0106
Epoch 23: Train loss = 0.0061, Val loss = 0.0100
Epoch 24: Train loss = 0.0061, Val loss = 0.0101
Epoch 25: Train loss = 0.0061, Val loss = 0.0097
Epoch 26: Train loss = 0.0060, Val loss = 0.0109
Epoch 27: Train loss = 0.0060, Val loss = 0.0116
Epoch 28: Train loss = 0.0060, Val loss = 0.0109
Epoch 29: Train loss = 0.0060, Val loss = 0.0105
Epoch 30: Train loss = 0.0060, Val loss = 0.0107
Epoch 31: Train loss = 0.0061, Val loss = 0.0101
Epoch 32: Train loss = 0.0059, Val loss = 0.0097
Epoch 33: Train loss = 0.0058, Val loss = 0.0090
Epoch 34: Train loss = 0.0058, Val loss = 0.0079
Epoch 35: Train loss = 0.0058, Val loss = 0.0082
Epoch 36: Train loss = 0.0058, Val loss = 0.0093
Epoch 37: Train loss = 0.0058, Val loss = 0.0079
Epoch 38: Train loss = 0.0057, Val loss = 0.0091
Epoch 39: Train loss = 0.0057, Val loss = 0.0087
Epoch 40: Train loss = 0.0057, Val loss = 0.0056
Epoch 41: Train loss = 0.0058, Val loss = 0.0044
Epoch 42: Train loss = 0.0058, Val loss = 0.0049
Epoch 43: Train loss = 0.0057, Val loss = 0.0053
Epoch 44: Train loss = 0.0057, Val loss = 0.0072
Epoch 45: Train loss = 0.0056, Val loss = 0.0079
Epoch 46: Train loss = 0.0055, Val loss = 0.0086
Epoch 47: Train loss = 0.0056, Val loss = 0.0088
Epoch 48: Train loss = 0.0054, Val loss = 0.0049
Epoch 49: Train loss = 0.0055, Val loss = 0.0052
Epoch 50: Train loss = 0.0053, Val loss = 0.0053
Epoch 51: Train loss = 0.0054, Val loss = 0.0061
Early stopping triggered at epoch 51.
Using device: mps
Epoch 1: Train loss = 0.0592, Val loss = 0.0387
Epoch 2: Train loss = 0.0561, Val loss = 0.0364
Epoch 3: Train loss = 0.0543, Val loss = 0.0348
Epoch 4: Train loss = 0.0526, Val loss = 0.0331
Epoch 5: Train loss = 0.0508, Val loss = 0.0312
Epoch 6: Train loss = 0.0484, Val loss = 0.0289
Epoch 7: Train loss = 0.0460, Val loss = 0.0265
Epoch 8: Train loss = 0.0434, Val loss = 0.0242
Epoch 9: Train loss = 0.0407, Val loss = 0.0220
Epoch 10: Train loss = 0.0381, Val loss = 0.0199
Epoch 11: Train loss = 0.0357, Val loss = 0.0181
Epoch 12: Train loss = 0.0333, Val loss = 0.0165
Epoch 13: Train loss = 0.0314, Val loss = 0.0151
Epoch 14: Train loss = 0.0294, Val loss = 0.0139
Epoch 15: Train loss = 0.0278, Val loss = 0.0127
Epoch 16: Train loss = 0.0262, Val loss = 0.0118
Epoch 17: Train loss = 0.0248, Val loss = 0.0108
Epoch 18: Train loss = 0.0235, Val loss = 0.0099
Epoch 19: Train loss = 0.0224, Val loss = 0.0092
Epoch 20: Train loss = 0.0214, Val loss = 0.0087
Epoch 21: Train loss = 0.0202, Val loss = 0.0082
Epoch 22: Train loss = 0.0199, Val loss = 0.0077
Epoch 23: Train loss = 0.0189, Val loss = 0.0073
Epoch 24: Train loss = 0.0181, Val loss = 0.0069
Epoch 25: Train loss = 0.0175, Val loss = 0.0065
Epoch 26: Train loss = 0.0169, Val loss = 0.0062
Epoch 27: Train loss = 0.0164, Val loss = 0.0061
Epoch 28: Train loss = 0.0159, Val loss = 0.0059
Epoch 29: Train loss = 0.0153, Val loss = 0.0058
Epoch 30: Train loss = 0.0150, Val loss = 0.0057
Epoch 31: Train loss = 0.0147, Val loss = 0.0056
Epoch 32: Train loss = 0.0143, Val loss = 0.0056
Epoch 33: Train loss = 0.0142, Val loss = 0.0055
Epoch 34: Train loss = 0.0136, Val loss = 0.0055
Epoch 35: Train loss = 0.0134, Val loss = 0.0055
Epoch 36: Train loss = 0.0130, Val loss = 0.0057
Epoch 37: Train loss = 0.0131, Val loss = 0.0059
Epoch 38: Train loss = 0.0129, Val loss = 0.0059
Epoch 39: Train loss = 0.0125, Val loss = 0.0059
Epoch 40: Train loss = 0.0122, Val loss = 0.0059
Epoch 41: Train loss = 0.0124, Val loss = 0.0059
Epoch 42: Train loss = 0.0123, Val loss = 0.0059
Epoch 43: Train loss = 0.0123, Val loss = 0.0060
Epoch 44: Train loss = 0.0118, Val loss = 0.0058
Epoch 45: Train loss = 0.0116, Val loss = 0.0057
Epoch 46: Train loss = 0.0115, Val loss = 0.0060
Epoch 47: Train loss = 0.0116, Val loss = 0.0060
Epoch 48: Train loss = 0.0113, Val loss = 0.0060
Epoch 49: Train loss = 0.0112, Val loss = 0.0060
Epoch 50: Train loss = 0.0114, Val loss = 0.0059
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0314, Val loss = 0.0147
Epoch 2: Train loss = 0.0234, Val loss = 0.0103
Epoch 3: Train loss = 0.0174, Val loss = 0.0076
Epoch 4: Train loss = 0.0138, Val loss = 0.0065
Epoch 5: Train loss = 0.0113, Val loss = 0.0056
Epoch 6: Train loss = 0.0100, Val loss = 0.0056
Epoch 7: Train loss = 0.0095, Val loss = 0.0057
Epoch 8: Train loss = 0.0090, Val loss = 0.0061
Epoch 9: Train loss = 0.0085, Val loss = 0.0053
Epoch 10: Train loss = 0.0084, Val loss = 0.0073
Epoch 11: Train loss = 0.0080, Val loss = 0.0063
Epoch 12: Train loss = 0.0080, Val loss = 0.0065
Epoch 13: Train loss = 0.0077, Val loss = 0.0072
Epoch 14: Train loss = 0.0077, Val loss = 0.0059
Epoch 15: Train loss = 0.0077, Val loss = 0.0059
Epoch 16: Train loss = 0.0076, Val loss = 0.0067
Epoch 17: Train loss = 0.0076, Val loss = 0.0088
Epoch 18: Train loss = 0.0074, Val loss = 0.0078
Epoch 19: Train loss = 0.0073, Val loss = 0.0059
Epoch 20: Train loss = 0.0072, Val loss = 0.0072
Epoch 21: Train loss = 0.0071, Val loss = 0.0070
Epoch 22: Train loss = 0.0071, Val loss = 0.0072
Epoch 23: Train loss = 0.0070, Val loss = 0.0071
Epoch 24: Train loss = 0.0069, Val loss = 0.0064
Epoch 25: Train loss = 0.0069, Val loss = 0.0072
Epoch 26: Train loss = 0.0068, Val loss = 0.0073
Epoch 27: Train loss = 0.0067, Val loss = 0.0064
Epoch 28: Train loss = 0.0067, Val loss = 0.0076
Epoch 29: Train loss = 0.0067, Val loss = 0.0077
Epoch 30: Train loss = 0.0065, Val loss = 0.0049
Epoch 31: Train loss = 0.0066, Val loss = 0.0052
Epoch 32: Train loss = 0.0064, Val loss = 0.0058
Epoch 33: Train loss = 0.0064, Val loss = 0.0061
Epoch 34: Train loss = 0.0064, Val loss = 0.0051
Epoch 35: Train loss = 0.0063, Val loss = 0.0068
Epoch 36: Train loss = 0.0065, Val loss = 0.0050
Epoch 37: Train loss = 0.0063, Val loss = 0.0050
Epoch 38: Train loss = 0.0062, Val loss = 0.0047
Epoch 39: Train loss = 0.0062, Val loss = 0.0061
Epoch 40: Train loss = 0.0062, Val loss = 0.0060
Epoch 41: Train loss = 0.0061, Val loss = 0.0083
Epoch 42: Train loss = 0.0063, Val loss = 0.0056
Epoch 43: Train loss = 0.0060, Val loss = 0.0049
Epoch 44: Train loss = 0.0062, Val loss = 0.0049
Epoch 45: Train loss = 0.0060, Val loss = 0.0063
Epoch 46: Train loss = 0.0060, Val loss = 0.0063
Epoch 47: Train loss = 0.0060, Val loss = 0.0072
Epoch 48: Train loss = 0.0059, Val loss = 0.0052
Epoch 49: Train loss = 0.0060, Val loss = 0.0048
Epoch 50: Train loss = 0.0060, Val loss = 0.0044
Epoch 51: Train loss = 0.0060, Val loss = 0.0045
Epoch 52: Train loss = 0.0059, Val loss = 0.0051
Epoch 53: Train loss = 0.0059, Val loss = 0.0054
Epoch 54: Train loss = 0.0058, Val loss = 0.0055
Epoch 55: Train loss = 0.0057, Val loss = 0.0055
Epoch 56: Train loss = 0.0059, Val loss = 0.0047
Epoch 57: Train loss = 0.0058, Val loss = 0.0062
Epoch 58: Train loss = 0.0058, Val loss = 0.0057
Epoch 59: Train loss = 0.0058, Val loss = 0.0059
Epoch 60: Train loss = 0.0057, Val loss = 0.0050
Early stopping triggered at epoch 60.
Using device: mps
Epoch 1: Train loss = 0.0572, Val loss = 0.0368
Epoch 2: Train loss = 0.0545, Val loss = 0.0343
Epoch 3: Train loss = 0.0524, Val loss = 0.0325
Epoch 4: Train loss = 0.0501, Val loss = 0.0309
Epoch 5: Train loss = 0.0480, Val loss = 0.0289
Epoch 6: Train loss = 0.0460, Val loss = 0.0268
Epoch 7: Train loss = 0.0435, Val loss = 0.0249
Epoch 8: Train loss = 0.0412, Val loss = 0.0233
Epoch 9: Train loss = 0.0394, Val loss = 0.0219
Epoch 10: Train loss = 0.0373, Val loss = 0.0206
Epoch 11: Train loss = 0.0355, Val loss = 0.0192
Epoch 12: Train loss = 0.0337, Val loss = 0.0178
Epoch 13: Train loss = 0.0316, Val loss = 0.0166
Epoch 14: Train loss = 0.0301, Val loss = 0.0154
Epoch 15: Train loss = 0.0283, Val loss = 0.0142
Epoch 16: Train loss = 0.0265, Val loss = 0.0132
Epoch 17: Train loss = 0.0250, Val loss = 0.0122
Epoch 18: Train loss = 0.0235, Val loss = 0.0114
Epoch 19: Train loss = 0.0222, Val loss = 0.0107
Epoch 20: Train loss = 0.0211, Val loss = 0.0101
Epoch 21: Train loss = 0.0200, Val loss = 0.0094
Epoch 22: Train loss = 0.0186, Val loss = 0.0087
Epoch 23: Train loss = 0.0181, Val loss = 0.0081
Epoch 24: Train loss = 0.0171, Val loss = 0.0077
Epoch 25: Train loss = 0.0166, Val loss = 0.0074
Epoch 26: Train loss = 0.0162, Val loss = 0.0071
Epoch 27: Train loss = 0.0154, Val loss = 0.0070
Epoch 28: Train loss = 0.0149, Val loss = 0.0067
Epoch 29: Train loss = 0.0147, Val loss = 0.0068
Epoch 30: Train loss = 0.0144, Val loss = 0.0067
Epoch 31: Train loss = 0.0142, Val loss = 0.0065
Epoch 32: Train loss = 0.0135, Val loss = 0.0061
Epoch 33: Train loss = 0.0133, Val loss = 0.0062
Epoch 34: Train loss = 0.0133, Val loss = 0.0065
Epoch 35: Train loss = 0.0129, Val loss = 0.0065
Epoch 36: Train loss = 0.0126, Val loss = 0.0065
Epoch 37: Train loss = 0.0124, Val loss = 0.0062
Epoch 38: Train loss = 0.0124, Val loss = 0.0061
Epoch 39: Train loss = 0.0121, Val loss = 0.0064
Epoch 40: Train loss = 0.0120, Val loss = 0.0063
Epoch 41: Train loss = 0.0119, Val loss = 0.0064
Epoch 42: Train loss = 0.0117, Val loss = 0.0064
Epoch 43: Train loss = 0.0116, Val loss = 0.0063
Epoch 44: Train loss = 0.0115, Val loss = 0.0066
Epoch 45: Train loss = 0.0115, Val loss = 0.0065
Epoch 46: Train loss = 0.0114, Val loss = 0.0063
Epoch 47: Train loss = 0.0111, Val loss = 0.0065
Epoch 48: Train loss = 0.0112, Val loss = 0.0064
Epoch 49: Train loss = 0.0110, Val loss = 0.0063
Epoch 50: Train loss = 0.0109, Val loss = 0.0066
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0455, Val loss = 0.0170
Epoch 2: Train loss = 0.0284, Val loss = 0.0116
Epoch 3: Train loss = 0.0197, Val loss = 0.0066
Epoch 4: Train loss = 0.0149, Val loss = 0.0055
Epoch 5: Train loss = 0.0121, Val loss = 0.0057
Epoch 6: Train loss = 0.0108, Val loss = 0.0052
Epoch 7: Train loss = 0.0102, Val loss = 0.0063
Epoch 8: Train loss = 0.0097, Val loss = 0.0057
Epoch 9: Train loss = 0.0094, Val loss = 0.0069
Epoch 10: Train loss = 0.0090, Val loss = 0.0065
Epoch 11: Train loss = 0.0087, Val loss = 0.0078
Epoch 12: Train loss = 0.0084, Val loss = 0.0072
Epoch 13: Train loss = 0.0085, Val loss = 0.0070
Epoch 14: Train loss = 0.0083, Val loss = 0.0073
Epoch 15: Train loss = 0.0081, Val loss = 0.0070
Epoch 16: Train loss = 0.0079, Val loss = 0.0077
Epoch 17: Train loss = 0.0079, Val loss = 0.0096
Epoch 18: Train loss = 0.0077, Val loss = 0.0062
Epoch 19: Train loss = 0.0077, Val loss = 0.0054
Epoch 20: Train loss = 0.0077, Val loss = 0.0051
Epoch 21: Train loss = 0.0074, Val loss = 0.0047
Epoch 22: Train loss = 0.0074, Val loss = 0.0062
Epoch 23: Train loss = 0.0075, Val loss = 0.0063
Epoch 24: Train loss = 0.0073, Val loss = 0.0071
Epoch 25: Train loss = 0.0073, Val loss = 0.0079
Epoch 26: Train loss = 0.0072, Val loss = 0.0083
Epoch 27: Train loss = 0.0071, Val loss = 0.0088
Epoch 28: Train loss = 0.0071, Val loss = 0.0092
Epoch 29: Train loss = 0.0071, Val loss = 0.0082
Epoch 30: Train loss = 0.0069, Val loss = 0.0068
Epoch 31: Train loss = 0.0069, Val loss = 0.0057
Epoch 32: Train loss = 0.0069, Val loss = 0.0059
Epoch 33: Train loss = 0.0068, Val loss = 0.0065
Epoch 34: Train loss = 0.0069, Val loss = 0.0064
Epoch 35: Train loss = 0.0069, Val loss = 0.0064
Epoch 36: Train loss = 0.0069, Val loss = 0.0069
Epoch 37: Train loss = 0.0068, Val loss = 0.0071
Epoch 38: Train loss = 0.0066, Val loss = 0.0069
Epoch 39: Train loss = 0.0066, Val loss = 0.0057
Epoch 40: Train loss = 0.0066, Val loss = 0.0053
Epoch 41: Train loss = 0.0067, Val loss = 0.0062
Epoch 42: Train loss = 0.0065, Val loss = 0.0070
Epoch 43: Train loss = 0.0063, Val loss = 0.0062
Epoch 44: Train loss = 0.0065, Val loss = 0.0062
Epoch 45: Train loss = 0.0064, Val loss = 0.0060
Epoch 46: Train loss = 0.0063, Val loss = 0.0058
Epoch 47: Train loss = 0.0064, Val loss = 0.0054
Epoch 48: Train loss = 0.0063, Val loss = 0.0067
Epoch 49: Train loss = 0.0063, Val loss = 0.0066
Epoch 50: Train loss = 0.0062, Val loss = 0.0058
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0592, Val loss = 0.0389
Epoch 2: Train loss = 0.0578, Val loss = 0.0378
Epoch 3: Train loss = 0.0566, Val loss = 0.0369
Epoch 4: Train loss = 0.0556, Val loss = 0.0361
Epoch 5: Train loss = 0.0543, Val loss = 0.0352
Epoch 6: Train loss = 0.0533, Val loss = 0.0344
Epoch 7: Train loss = 0.0523, Val loss = 0.0335
Epoch 8: Train loss = 0.0515, Val loss = 0.0326
Epoch 9: Train loss = 0.0505, Val loss = 0.0316
Epoch 10: Train loss = 0.0492, Val loss = 0.0306
Epoch 11: Train loss = 0.0482, Val loss = 0.0294
Epoch 12: Train loss = 0.0466, Val loss = 0.0280
Epoch 13: Train loss = 0.0451, Val loss = 0.0264
Epoch 14: Train loss = 0.0429, Val loss = 0.0248
Epoch 15: Train loss = 0.0412, Val loss = 0.0232
Epoch 16: Train loss = 0.0390, Val loss = 0.0216
Epoch 17: Train loss = 0.0368, Val loss = 0.0201
Epoch 18: Train loss = 0.0346, Val loss = 0.0186
Epoch 19: Train loss = 0.0323, Val loss = 0.0172
Epoch 20: Train loss = 0.0304, Val loss = 0.0158
Epoch 21: Train loss = 0.0284, Val loss = 0.0145
Epoch 22: Train loss = 0.0265, Val loss = 0.0133
Epoch 23: Train loss = 0.0251, Val loss = 0.0121
Epoch 24: Train loss = 0.0236, Val loss = 0.0110
Epoch 25: Train loss = 0.0221, Val loss = 0.0100
Epoch 26: Train loss = 0.0209, Val loss = 0.0091
Epoch 27: Train loss = 0.0196, Val loss = 0.0083
Epoch 28: Train loss = 0.0187, Val loss = 0.0076
Epoch 29: Train loss = 0.0181, Val loss = 0.0070
Epoch 30: Train loss = 0.0168, Val loss = 0.0066
Epoch 31: Train loss = 0.0164, Val loss = 0.0062
Epoch 32: Train loss = 0.0159, Val loss = 0.0060
Epoch 33: Train loss = 0.0155, Val loss = 0.0057
Epoch 34: Train loss = 0.0148, Val loss = 0.0055
Epoch 35: Train loss = 0.0145, Val loss = 0.0053
Epoch 36: Train loss = 0.0141, Val loss = 0.0052
Epoch 37: Train loss = 0.0137, Val loss = 0.0051
Epoch 38: Train loss = 0.0133, Val loss = 0.0052
Epoch 39: Train loss = 0.0132, Val loss = 0.0052
Epoch 40: Train loss = 0.0129, Val loss = 0.0051
Epoch 41: Train loss = 0.0129, Val loss = 0.0051
Epoch 42: Train loss = 0.0128, Val loss = 0.0051
Epoch 43: Train loss = 0.0124, Val loss = 0.0049
Epoch 44: Train loss = 0.0124, Val loss = 0.0048
Epoch 45: Train loss = 0.0123, Val loss = 0.0049
Epoch 46: Train loss = 0.0121, Val loss = 0.0049
Epoch 47: Train loss = 0.0118, Val loss = 0.0050
Epoch 48: Train loss = 0.0119, Val loss = 0.0051
Epoch 49: Train loss = 0.0116, Val loss = 0.0051
Epoch 50: Train loss = 0.0116, Val loss = 0.0051
Epoch 51: Train loss = 0.0115, Val loss = 0.0052
Epoch 52: Train loss = 0.0114, Val loss = 0.0050
Epoch 53: Train loss = 0.0113, Val loss = 0.0052
Early stopping triggered at epoch 53.
Using device: mps
Epoch 1: Train loss = 0.0427, Val loss = 0.0226
Epoch 2: Train loss = 0.0333, Val loss = 0.0149
Epoch 3: Train loss = 0.0234, Val loss = 0.0115
Epoch 4: Train loss = 0.0167, Val loss = 0.0062
Epoch 5: Train loss = 0.0128, Val loss = 0.0064
Epoch 6: Train loss = 0.0113, Val loss = 0.0047
Epoch 7: Train loss = 0.0105, Val loss = 0.0054
Epoch 8: Train loss = 0.0099, Val loss = 0.0049
Epoch 9: Train loss = 0.0096, Val loss = 0.0059
Epoch 10: Train loss = 0.0094, Val loss = 0.0049
Epoch 11: Train loss = 0.0090, Val loss = 0.0066
Epoch 12: Train loss = 0.0089, Val loss = 0.0052
Epoch 13: Train loss = 0.0086, Val loss = 0.0059
Epoch 14: Train loss = 0.0084, Val loss = 0.0054
Epoch 15: Train loss = 0.0084, Val loss = 0.0064
Epoch 16: Train loss = 0.0081, Val loss = 0.0061
Epoch 17: Train loss = 0.0080, Val loss = 0.0048
Epoch 18: Train loss = 0.0079, Val loss = 0.0051
Epoch 19: Train loss = 0.0077, Val loss = 0.0065
Epoch 20: Train loss = 0.0077, Val loss = 0.0058
Epoch 21: Train loss = 0.0076, Val loss = 0.0050
Epoch 22: Train loss = 0.0073, Val loss = 0.0070
Epoch 23: Train loss = 0.0073, Val loss = 0.0071
Epoch 24: Train loss = 0.0073, Val loss = 0.0061
Epoch 25: Train loss = 0.0071, Val loss = 0.0057
Epoch 26: Train loss = 0.0071, Val loss = 0.0061
Epoch 27: Train loss = 0.0070, Val loss = 0.0060
Epoch 28: Train loss = 0.0070, Val loss = 0.0052
Epoch 29: Train loss = 0.0069, Val loss = 0.0053
Epoch 30: Train loss = 0.0069, Val loss = 0.0070
Epoch 31: Train loss = 0.0070, Val loss = 0.0075
Epoch 32: Train loss = 0.0068, Val loss = 0.0072
Epoch 33: Train loss = 0.0067, Val loss = 0.0067
Epoch 34: Train loss = 0.0067, Val loss = 0.0062
Epoch 35: Train loss = 0.0066, Val loss = 0.0051
Epoch 36: Train loss = 0.0065, Val loss = 0.0061
Epoch 37: Train loss = 0.0066, Val loss = 0.0067
Epoch 38: Train loss = 0.0064, Val loss = 0.0055
Epoch 39: Train loss = 0.0064, Val loss = 0.0057
Epoch 40: Train loss = 0.0064, Val loss = 0.0070
Epoch 41: Train loss = 0.0062, Val loss = 0.0048
Epoch 42: Train loss = 0.0062, Val loss = 0.0050
Epoch 43: Train loss = 0.0062, Val loss = 0.0049
Epoch 44: Train loss = 0.0061, Val loss = 0.0041
Epoch 45: Train loss = 0.0061, Val loss = 0.0044
Epoch 46: Train loss = 0.0061, Val loss = 0.0050
Epoch 47: Train loss = 0.0061, Val loss = 0.0051
Epoch 48: Train loss = 0.0060, Val loss = 0.0052
Epoch 49: Train loss = 0.0061, Val loss = 0.0051
Epoch 50: Train loss = 0.0062, Val loss = 0.0057
Epoch 51: Train loss = 0.0059, Val loss = 0.0050
Epoch 52: Train loss = 0.0058, Val loss = 0.0049
Epoch 53: Train loss = 0.0059, Val loss = 0.0050
Epoch 54: Train loss = 0.0058, Val loss = 0.0056
Early stopping triggered at epoch 54.
Using device: mps
Epoch 1: Train loss = 0.0476, Val loss = 0.0301
Epoch 2: Train loss = 0.0467, Val loss = 0.0292
Epoch 3: Train loss = 0.0459, Val loss = 0.0284
Epoch 4: Train loss = 0.0449, Val loss = 0.0276
Epoch 5: Train loss = 0.0440, Val loss = 0.0267
Epoch 6: Train loss = 0.0429, Val loss = 0.0258
Epoch 7: Train loss = 0.0419, Val loss = 0.0248
Epoch 8: Train loss = 0.0406, Val loss = 0.0237
Epoch 9: Train loss = 0.0392, Val loss = 0.0227
Epoch 10: Train loss = 0.0379, Val loss = 0.0217
Epoch 11: Train loss = 0.0361, Val loss = 0.0207
Epoch 12: Train loss = 0.0348, Val loss = 0.0196
Epoch 13: Train loss = 0.0333, Val loss = 0.0185
Epoch 14: Train loss = 0.0316, Val loss = 0.0173
Epoch 15: Train loss = 0.0301, Val loss = 0.0161
Epoch 16: Train loss = 0.0283, Val loss = 0.0149
Epoch 17: Train loss = 0.0268, Val loss = 0.0137
Epoch 18: Train loss = 0.0255, Val loss = 0.0126
Epoch 19: Train loss = 0.0240, Val loss = 0.0117
Epoch 20: Train loss = 0.0227, Val loss = 0.0110
Epoch 21: Train loss = 0.0215, Val loss = 0.0102
Epoch 22: Train loss = 0.0204, Val loss = 0.0095
Epoch 23: Train loss = 0.0192, Val loss = 0.0088
Epoch 24: Train loss = 0.0185, Val loss = 0.0082
Epoch 25: Train loss = 0.0174, Val loss = 0.0076
Epoch 26: Train loss = 0.0169, Val loss = 0.0072
Epoch 27: Train loss = 0.0163, Val loss = 0.0068
Epoch 28: Train loss = 0.0154, Val loss = 0.0065
Epoch 29: Train loss = 0.0150, Val loss = 0.0063
Epoch 30: Train loss = 0.0146, Val loss = 0.0061
Epoch 31: Train loss = 0.0141, Val loss = 0.0059
Epoch 32: Train loss = 0.0136, Val loss = 0.0058
Epoch 33: Train loss = 0.0131, Val loss = 0.0057
Epoch 34: Train loss = 0.0131, Val loss = 0.0055
Epoch 35: Train loss = 0.0128, Val loss = 0.0055
Epoch 36: Train loss = 0.0125, Val loss = 0.0055
Epoch 37: Train loss = 0.0124, Val loss = 0.0055
Epoch 38: Train loss = 0.0119, Val loss = 0.0055
Epoch 39: Train loss = 0.0116, Val loss = 0.0055
Epoch 40: Train loss = 0.0116, Val loss = 0.0055
Epoch 41: Train loss = 0.0115, Val loss = 0.0055
Epoch 42: Train loss = 0.0113, Val loss = 0.0055
Epoch 43: Train loss = 0.0112, Val loss = 0.0055
Epoch 44: Train loss = 0.0111, Val loss = 0.0057
Epoch 45: Train loss = 0.0111, Val loss = 0.0056
Epoch 46: Train loss = 0.0110, Val loss = 0.0056
Epoch 47: Train loss = 0.0108, Val loss = 0.0056
Epoch 48: Train loss = 0.0107, Val loss = 0.0057
Epoch 49: Train loss = 0.0107, Val loss = 0.0056
Epoch 50: Train loss = 0.0108, Val loss = 0.0059
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0517, Val loss = 0.0300
Epoch 2: Train loss = 0.0442, Val loss = 0.0222
Epoch 3: Train loss = 0.0343, Val loss = 0.0150
Epoch 4: Train loss = 0.0247, Val loss = 0.0123
Epoch 5: Train loss = 0.0183, Val loss = 0.0074
Epoch 6: Train loss = 0.0138, Val loss = 0.0066
Epoch 7: Train loss = 0.0116, Val loss = 0.0048
Epoch 8: Train loss = 0.0104, Val loss = 0.0050
Epoch 9: Train loss = 0.0097, Val loss = 0.0047
Epoch 10: Train loss = 0.0093, Val loss = 0.0051
Epoch 11: Train loss = 0.0091, Val loss = 0.0047
Epoch 12: Train loss = 0.0087, Val loss = 0.0059
Epoch 13: Train loss = 0.0086, Val loss = 0.0047
Epoch 14: Train loss = 0.0084, Val loss = 0.0051
Epoch 15: Train loss = 0.0084, Val loss = 0.0055
Epoch 16: Train loss = 0.0082, Val loss = 0.0048
Epoch 17: Train loss = 0.0082, Val loss = 0.0056
Epoch 18: Train loss = 0.0081, Val loss = 0.0053
Epoch 19: Train loss = 0.0080, Val loss = 0.0041
Epoch 20: Train loss = 0.0080, Val loss = 0.0061
Epoch 21: Train loss = 0.0079, Val loss = 0.0048
Epoch 22: Train loss = 0.0080, Val loss = 0.0044
Epoch 23: Train loss = 0.0077, Val loss = 0.0050
Epoch 24: Train loss = 0.0076, Val loss = 0.0063
Epoch 25: Train loss = 0.0077, Val loss = 0.0046
Epoch 26: Train loss = 0.0075, Val loss = 0.0053
Epoch 27: Train loss = 0.0074, Val loss = 0.0052
Epoch 28: Train loss = 0.0072, Val loss = 0.0054
Epoch 29: Train loss = 0.0074, Val loss = 0.0056
Epoch 30: Train loss = 0.0072, Val loss = 0.0055
Epoch 31: Train loss = 0.0072, Val loss = 0.0052
Epoch 32: Train loss = 0.0071, Val loss = 0.0049
Epoch 33: Train loss = 0.0071, Val loss = 0.0062
Epoch 34: Train loss = 0.0071, Val loss = 0.0070
Epoch 35: Train loss = 0.0070, Val loss = 0.0060
Epoch 36: Train loss = 0.0070, Val loss = 0.0055
Epoch 37: Train loss = 0.0068, Val loss = 0.0048
Epoch 38: Train loss = 0.0069, Val loss = 0.0046
Epoch 39: Train loss = 0.0070, Val loss = 0.0053
Epoch 40: Train loss = 0.0069, Val loss = 0.0061
Epoch 41: Train loss = 0.0069, Val loss = 0.0077
Epoch 42: Train loss = 0.0069, Val loss = 0.0070
Epoch 43: Train loss = 0.0067, Val loss = 0.0062
Epoch 44: Train loss = 0.0067, Val loss = 0.0048
Epoch 45: Train loss = 0.0066, Val loss = 0.0046
Epoch 46: Train loss = 0.0067, Val loss = 0.0049
Epoch 47: Train loss = 0.0064, Val loss = 0.0051
Epoch 48: Train loss = 0.0065, Val loss = 0.0063
Epoch 49: Train loss = 0.0064, Val loss = 0.0057
Epoch 50: Train loss = 0.0065, Val loss = 0.0059
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0451, Val loss = 0.0246
Epoch 2: Train loss = 0.0394, Val loss = 0.0193
Epoch 3: Train loss = 0.0335, Val loss = 0.0143
Epoch 4: Train loss = 0.0268, Val loss = 0.0099
Epoch 5: Train loss = 0.0204, Val loss = 0.0077
Epoch 6: Train loss = 0.0152, Val loss = 0.0085
Epoch 7: Train loss = 0.0124, Val loss = 0.0100
Epoch 8: Train loss = 0.0110, Val loss = 0.0093
Epoch 9: Train loss = 0.0103, Val loss = 0.0079
Epoch 10: Train loss = 0.0099, Val loss = 0.0070
Epoch 11: Train loss = 0.0097, Val loss = 0.0067
Epoch 12: Train loss = 0.0093, Val loss = 0.0069
Epoch 13: Train loss = 0.0090, Val loss = 0.0071
Epoch 14: Train loss = 0.0089, Val loss = 0.0073
Epoch 15: Train loss = 0.0088, Val loss = 0.0072
Epoch 16: Train loss = 0.0086, Val loss = 0.0073
Epoch 17: Train loss = 0.0085, Val loss = 0.0074
Epoch 18: Train loss = 0.0084, Val loss = 0.0076
Epoch 19: Train loss = 0.0084, Val loss = 0.0076
Epoch 20: Train loss = 0.0082, Val loss = 0.0081
Epoch 21: Train loss = 0.0081, Val loss = 0.0079
Epoch 22: Train loss = 0.0080, Val loss = 0.0080
Epoch 23: Train loss = 0.0079, Val loss = 0.0080
Epoch 24: Train loss = 0.0079, Val loss = 0.0082
Epoch 25: Train loss = 0.0079, Val loss = 0.0082
Epoch 26: Train loss = 0.0079, Val loss = 0.0082
Epoch 27: Train loss = 0.0078, Val loss = 0.0084
Epoch 28: Train loss = 0.0078, Val loss = 0.0089
Epoch 29: Train loss = 0.0076, Val loss = 0.0085
Epoch 30: Train loss = 0.0077, Val loss = 0.0086
Epoch 31: Train loss = 0.0076, Val loss = 0.0089
Epoch 32: Train loss = 0.0074, Val loss = 0.0087
Epoch 33: Train loss = 0.0076, Val loss = 0.0084
Epoch 34: Train loss = 0.0074, Val loss = 0.0090
Epoch 35: Train loss = 0.0074, Val loss = 0.0090
Epoch 36: Train loss = 0.0074, Val loss = 0.0086
Epoch 37: Train loss = 0.0074, Val loss = 0.0089
Epoch 38: Train loss = 0.0074, Val loss = 0.0087
Epoch 39: Train loss = 0.0072, Val loss = 0.0090
Epoch 40: Train loss = 0.0073, Val loss = 0.0087
Epoch 41: Train loss = 0.0072, Val loss = 0.0086
Epoch 42: Train loss = 0.0072, Val loss = 0.0092
Epoch 43: Train loss = 0.0070, Val loss = 0.0083
Epoch 44: Train loss = 0.0072, Val loss = 0.0089
Epoch 45: Train loss = 0.0071, Val loss = 0.0090
Epoch 46: Train loss = 0.0071, Val loss = 0.0085
Epoch 47: Train loss = 0.0070, Val loss = 0.0094
Epoch 48: Train loss = 0.0070, Val loss = 0.0083
Epoch 49: Train loss = 0.0069, Val loss = 0.0090
Epoch 50: Train loss = 0.0069, Val loss = 0.0082
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0263, Val loss = 0.0155
Epoch 2: Train loss = 0.0113, Val loss = 0.0056
Epoch 3: Train loss = 0.0094, Val loss = 0.0081
Epoch 4: Train loss = 0.0086, Val loss = 0.0077
Epoch 5: Train loss = 0.0082, Val loss = 0.0067
Epoch 6: Train loss = 0.0078, Val loss = 0.0108
Epoch 7: Train loss = 0.0077, Val loss = 0.0067
Epoch 8: Train loss = 0.0075, Val loss = 0.0097
Epoch 9: Train loss = 0.0075, Val loss = 0.0089
Epoch 10: Train loss = 0.0072, Val loss = 0.0081
Epoch 11: Train loss = 0.0071, Val loss = 0.0092
Epoch 12: Train loss = 0.0070, Val loss = 0.0080
Epoch 13: Train loss = 0.0068, Val loss = 0.0071
Epoch 14: Train loss = 0.0067, Val loss = 0.0072
Epoch 15: Train loss = 0.0069, Val loss = 0.0082
Epoch 16: Train loss = 0.0068, Val loss = 0.0101
Epoch 17: Train loss = 0.0068, Val loss = 0.0113
Epoch 18: Train loss = 0.0067, Val loss = 0.0097
Epoch 19: Train loss = 0.0066, Val loss = 0.0097
Epoch 20: Train loss = 0.0065, Val loss = 0.0086
Epoch 21: Train loss = 0.0064, Val loss = 0.0094
Epoch 22: Train loss = 0.0062, Val loss = 0.0056
Epoch 23: Train loss = 0.0063, Val loss = 0.0064
Epoch 24: Train loss = 0.0060, Val loss = 0.0067
Epoch 25: Train loss = 0.0061, Val loss = 0.0093
Epoch 26: Train loss = 0.0062, Val loss = 0.0073
Epoch 27: Train loss = 0.0062, Val loss = 0.0049
Epoch 28: Train loss = 0.0063, Val loss = 0.0064
Epoch 29: Train loss = 0.0060, Val loss = 0.0082
Epoch 30: Train loss = 0.0059, Val loss = 0.0091
Epoch 31: Train loss = 0.0058, Val loss = 0.0067
Epoch 32: Train loss = 0.0058, Val loss = 0.0042
Epoch 33: Train loss = 0.0058, Val loss = 0.0054
Epoch 34: Train loss = 0.0057, Val loss = 0.0058
Epoch 35: Train loss = 0.0056, Val loss = 0.0057
Epoch 36: Train loss = 0.0055, Val loss = 0.0062
Epoch 37: Train loss = 0.0055, Val loss = 0.0069
Epoch 38: Train loss = 0.0056, Val loss = 0.0066
Epoch 39: Train loss = 0.0055, Val loss = 0.0056
Epoch 40: Train loss = 0.0054, Val loss = 0.0064
Epoch 41: Train loss = 0.0054, Val loss = 0.0050
Epoch 42: Train loss = 0.0053, Val loss = 0.0075
Epoch 43: Train loss = 0.0054, Val loss = 0.0051
Epoch 44: Train loss = 0.0053, Val loss = 0.0076
Epoch 45: Train loss = 0.0053, Val loss = 0.0057
Epoch 46: Train loss = 0.0052, Val loss = 0.0068
Epoch 47: Train loss = 0.0053, Val loss = 0.0071
Epoch 48: Train loss = 0.0052, Val loss = 0.0043
Epoch 49: Train loss = 0.0052, Val loss = 0.0076
Epoch 50: Train loss = 0.0051, Val loss = 0.0093
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0476, Val loss = 0.0257
Epoch 2: Train loss = 0.0407, Val loss = 0.0201
Epoch 3: Train loss = 0.0341, Val loss = 0.0143
Epoch 4: Train loss = 0.0272, Val loss = 0.0098
Epoch 5: Train loss = 0.0202, Val loss = 0.0086
Epoch 6: Train loss = 0.0151, Val loss = 0.0103
Epoch 7: Train loss = 0.0123, Val loss = 0.0114
Epoch 8: Train loss = 0.0107, Val loss = 0.0102
Epoch 9: Train loss = 0.0099, Val loss = 0.0085
Epoch 10: Train loss = 0.0094, Val loss = 0.0076
Epoch 11: Train loss = 0.0092, Val loss = 0.0076
Epoch 12: Train loss = 0.0090, Val loss = 0.0081
Epoch 13: Train loss = 0.0088, Val loss = 0.0082
Epoch 14: Train loss = 0.0087, Val loss = 0.0079
Epoch 15: Train loss = 0.0085, Val loss = 0.0080
Epoch 16: Train loss = 0.0083, Val loss = 0.0081
Epoch 17: Train loss = 0.0082, Val loss = 0.0081
Epoch 18: Train loss = 0.0083, Val loss = 0.0080
Epoch 19: Train loss = 0.0082, Val loss = 0.0082
Epoch 20: Train loss = 0.0081, Val loss = 0.0084
Epoch 21: Train loss = 0.0080, Val loss = 0.0082
Epoch 22: Train loss = 0.0080, Val loss = 0.0080
Epoch 23: Train loss = 0.0079, Val loss = 0.0089
Epoch 24: Train loss = 0.0079, Val loss = 0.0087
Epoch 25: Train loss = 0.0078, Val loss = 0.0081
Epoch 26: Train loss = 0.0078, Val loss = 0.0086
Epoch 27: Train loss = 0.0078, Val loss = 0.0084
Epoch 28: Train loss = 0.0078, Val loss = 0.0084
Epoch 29: Train loss = 0.0077, Val loss = 0.0085
Epoch 30: Train loss = 0.0076, Val loss = 0.0089
Epoch 31: Train loss = 0.0075, Val loss = 0.0084
Epoch 32: Train loss = 0.0075, Val loss = 0.0083
Epoch 33: Train loss = 0.0076, Val loss = 0.0084
Epoch 34: Train loss = 0.0075, Val loss = 0.0089
Epoch 35: Train loss = 0.0075, Val loss = 0.0083
Epoch 36: Train loss = 0.0074, Val loss = 0.0087
Epoch 37: Train loss = 0.0073, Val loss = 0.0087
Epoch 38: Train loss = 0.0074, Val loss = 0.0084
Epoch 39: Train loss = 0.0073, Val loss = 0.0093
Epoch 40: Train loss = 0.0072, Val loss = 0.0083
Epoch 41: Train loss = 0.0073, Val loss = 0.0085
Epoch 42: Train loss = 0.0072, Val loss = 0.0091
Epoch 43: Train loss = 0.0072, Val loss = 0.0084
Epoch 44: Train loss = 0.0071, Val loss = 0.0089
Epoch 45: Train loss = 0.0070, Val loss = 0.0086
Epoch 46: Train loss = 0.0071, Val loss = 0.0085
Epoch 47: Train loss = 0.0071, Val loss = 0.0088
Epoch 48: Train loss = 0.0070, Val loss = 0.0088
Epoch 49: Train loss = 0.0070, Val loss = 0.0082
Epoch 50: Train loss = 0.0070, Val loss = 0.0089
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0350, Val loss = 0.0247
Epoch 2: Train loss = 0.0129, Val loss = 0.0051
Epoch 3: Train loss = 0.0106, Val loss = 0.0112
Epoch 4: Train loss = 0.0089, Val loss = 0.0058
Epoch 5: Train loss = 0.0083, Val loss = 0.0083
Epoch 6: Train loss = 0.0079, Val loss = 0.0066
Epoch 7: Train loss = 0.0077, Val loss = 0.0078
Epoch 8: Train loss = 0.0076, Val loss = 0.0093
Epoch 9: Train loss = 0.0075, Val loss = 0.0090
Epoch 10: Train loss = 0.0073, Val loss = 0.0059
Epoch 11: Train loss = 0.0072, Val loss = 0.0086
Epoch 12: Train loss = 0.0071, Val loss = 0.0102
Epoch 13: Train loss = 0.0069, Val loss = 0.0076
Epoch 14: Train loss = 0.0069, Val loss = 0.0072
Epoch 15: Train loss = 0.0068, Val loss = 0.0066
Epoch 16: Train loss = 0.0067, Val loss = 0.0065
Epoch 17: Train loss = 0.0067, Val loss = 0.0068
Epoch 18: Train loss = 0.0066, Val loss = 0.0067
Epoch 19: Train loss = 0.0065, Val loss = 0.0069
Epoch 20: Train loss = 0.0065, Val loss = 0.0057
Epoch 21: Train loss = 0.0064, Val loss = 0.0056
Epoch 22: Train loss = 0.0063, Val loss = 0.0054
Epoch 23: Train loss = 0.0064, Val loss = 0.0101
Epoch 24: Train loss = 0.0063, Val loss = 0.0082
Epoch 25: Train loss = 0.0063, Val loss = 0.0054
Epoch 26: Train loss = 0.0063, Val loss = 0.0048
Epoch 27: Train loss = 0.0063, Val loss = 0.0056
Epoch 28: Train loss = 0.0062, Val loss = 0.0077
Epoch 29: Train loss = 0.0061, Val loss = 0.0102
Epoch 30: Train loss = 0.0062, Val loss = 0.0089
Epoch 31: Train loss = 0.0062, Val loss = 0.0054
Epoch 32: Train loss = 0.0059, Val loss = 0.0055
Epoch 33: Train loss = 0.0059, Val loss = 0.0061
Epoch 34: Train loss = 0.0059, Val loss = 0.0077
Epoch 35: Train loss = 0.0058, Val loss = 0.0049
Epoch 36: Train loss = 0.0058, Val loss = 0.0044
Epoch 37: Train loss = 0.0060, Val loss = 0.0103
Epoch 38: Train loss = 0.0060, Val loss = 0.0072
Epoch 39: Train loss = 0.0058, Val loss = 0.0050
Epoch 40: Train loss = 0.0056, Val loss = 0.0039
Epoch 41: Train loss = 0.0057, Val loss = 0.0047
Epoch 42: Train loss = 0.0056, Val loss = 0.0052
Epoch 43: Train loss = 0.0055, Val loss = 0.0053
Epoch 44: Train loss = 0.0055, Val loss = 0.0060
Epoch 45: Train loss = 0.0054, Val loss = 0.0049
Epoch 46: Train loss = 0.0053, Val loss = 0.0046
Epoch 47: Train loss = 0.0053, Val loss = 0.0053
Epoch 48: Train loss = 0.0054, Val loss = 0.0041
Epoch 49: Train loss = 0.0053, Val loss = 0.0057
Epoch 50: Train loss = 0.0052, Val loss = 0.0057
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0483, Val loss = 0.0291
Epoch 2: Train loss = 0.0459, Val loss = 0.0266
Epoch 3: Train loss = 0.0426, Val loss = 0.0234
Epoch 4: Train loss = 0.0386, Val loss = 0.0194
Epoch 5: Train loss = 0.0334, Val loss = 0.0146
Epoch 6: Train loss = 0.0268, Val loss = 0.0100
Epoch 7: Train loss = 0.0200, Val loss = 0.0092
Epoch 8: Train loss = 0.0153, Val loss = 0.0121
Epoch 9: Train loss = 0.0127, Val loss = 0.0103
Epoch 10: Train loss = 0.0106, Val loss = 0.0070
Epoch 11: Train loss = 0.0095, Val loss = 0.0062
Epoch 12: Train loss = 0.0092, Val loss = 0.0070
Epoch 13: Train loss = 0.0090, Val loss = 0.0074
Epoch 14: Train loss = 0.0088, Val loss = 0.0069
Epoch 15: Train loss = 0.0086, Val loss = 0.0065
Epoch 16: Train loss = 0.0085, Val loss = 0.0068
Epoch 17: Train loss = 0.0083, Val loss = 0.0070
Epoch 18: Train loss = 0.0083, Val loss = 0.0068
Epoch 19: Train loss = 0.0081, Val loss = 0.0067
Epoch 20: Train loss = 0.0081, Val loss = 0.0072
Epoch 21: Train loss = 0.0081, Val loss = 0.0070
Epoch 22: Train loss = 0.0080, Val loss = 0.0070
Epoch 23: Train loss = 0.0080, Val loss = 0.0069
Epoch 24: Train loss = 0.0080, Val loss = 0.0069
Epoch 25: Train loss = 0.0080, Val loss = 0.0077
Epoch 26: Train loss = 0.0078, Val loss = 0.0073
Epoch 27: Train loss = 0.0078, Val loss = 0.0075
Epoch 28: Train loss = 0.0077, Val loss = 0.0075
Epoch 29: Train loss = 0.0077, Val loss = 0.0078
Epoch 30: Train loss = 0.0076, Val loss = 0.0074
Epoch 31: Train loss = 0.0077, Val loss = 0.0073
Epoch 32: Train loss = 0.0076, Val loss = 0.0080
Epoch 33: Train loss = 0.0075, Val loss = 0.0074
Epoch 34: Train loss = 0.0075, Val loss = 0.0075
Epoch 35: Train loss = 0.0075, Val loss = 0.0076
Epoch 36: Train loss = 0.0075, Val loss = 0.0074
Epoch 37: Train loss = 0.0075, Val loss = 0.0077
Epoch 38: Train loss = 0.0074, Val loss = 0.0076
Epoch 39: Train loss = 0.0073, Val loss = 0.0076
Epoch 40: Train loss = 0.0074, Val loss = 0.0074
Epoch 41: Train loss = 0.0073, Val loss = 0.0078
Epoch 42: Train loss = 0.0072, Val loss = 0.0070
Epoch 43: Train loss = 0.0073, Val loss = 0.0080
Epoch 44: Train loss = 0.0072, Val loss = 0.0072
Epoch 45: Train loss = 0.0072, Val loss = 0.0078
Epoch 46: Train loss = 0.0072, Val loss = 0.0075
Epoch 47: Train loss = 0.0071, Val loss = 0.0077
Epoch 48: Train loss = 0.0072, Val loss = 0.0076
Epoch 49: Train loss = 0.0071, Val loss = 0.0076
Epoch 50: Train loss = 0.0071, Val loss = 0.0074
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0440, Val loss = 0.0086
Epoch 2: Train loss = 0.0149, Val loss = 0.0043
Epoch 3: Train loss = 0.0111, Val loss = 0.0087
Epoch 4: Train loss = 0.0091, Val loss = 0.0048
Epoch 5: Train loss = 0.0083, Val loss = 0.0082
Epoch 6: Train loss = 0.0080, Val loss = 0.0057
Epoch 7: Train loss = 0.0076, Val loss = 0.0070
Epoch 8: Train loss = 0.0076, Val loss = 0.0078
Epoch 9: Train loss = 0.0075, Val loss = 0.0065
Epoch 10: Train loss = 0.0074, Val loss = 0.0079
Epoch 11: Train loss = 0.0073, Val loss = 0.0073
Epoch 12: Train loss = 0.0071, Val loss = 0.0081
Epoch 13: Train loss = 0.0071, Val loss = 0.0074
Epoch 14: Train loss = 0.0071, Val loss = 0.0075
Epoch 15: Train loss = 0.0070, Val loss = 0.0064
Epoch 16: Train loss = 0.0070, Val loss = 0.0063
Epoch 17: Train loss = 0.0068, Val loss = 0.0060
Epoch 18: Train loss = 0.0068, Val loss = 0.0061
Epoch 19: Train loss = 0.0068, Val loss = 0.0070
Epoch 20: Train loss = 0.0067, Val loss = 0.0092
Epoch 21: Train loss = 0.0067, Val loss = 0.0090
Epoch 22: Train loss = 0.0066, Val loss = 0.0094
Epoch 23: Train loss = 0.0066, Val loss = 0.0062
Epoch 24: Train loss = 0.0066, Val loss = 0.0073
Epoch 25: Train loss = 0.0065, Val loss = 0.0066
Epoch 26: Train loss = 0.0063, Val loss = 0.0088
Epoch 27: Train loss = 0.0064, Val loss = 0.0062
Epoch 28: Train loss = 0.0062, Val loss = 0.0069
Epoch 29: Train loss = 0.0062, Val loss = 0.0083
Epoch 30: Train loss = 0.0063, Val loss = 0.0071
Epoch 31: Train loss = 0.0062, Val loss = 0.0052
Epoch 32: Train loss = 0.0061, Val loss = 0.0055
Epoch 33: Train loss = 0.0061, Val loss = 0.0064
Epoch 34: Train loss = 0.0060, Val loss = 0.0041
Epoch 35: Train loss = 0.0060, Val loss = 0.0075
Epoch 36: Train loss = 0.0060, Val loss = 0.0067
Epoch 37: Train loss = 0.0061, Val loss = 0.0048
Epoch 38: Train loss = 0.0059, Val loss = 0.0052
Epoch 39: Train loss = 0.0058, Val loss = 0.0068
Epoch 40: Train loss = 0.0059, Val loss = 0.0053
Epoch 41: Train loss = 0.0055, Val loss = 0.0055
Epoch 42: Train loss = 0.0055, Val loss = 0.0047
Epoch 43: Train loss = 0.0055, Val loss = 0.0053
Epoch 44: Train loss = 0.0055, Val loss = 0.0065
Epoch 45: Train loss = 0.0055, Val loss = 0.0039
Epoch 46: Train loss = 0.0056, Val loss = 0.0062
Epoch 47: Train loss = 0.0054, Val loss = 0.0076
Epoch 48: Train loss = 0.0053, Val loss = 0.0062
Epoch 49: Train loss = 0.0053, Val loss = 0.0078
Epoch 50: Train loss = 0.0051, Val loss = 0.0056
Epoch 51: Train loss = 0.0051, Val loss = 0.0063
Epoch 52: Train loss = 0.0051, Val loss = 0.0077
Epoch 53: Train loss = 0.0050, Val loss = 0.0060
Epoch 54: Train loss = 0.0049, Val loss = 0.0068
Epoch 55: Train loss = 0.0050, Val loss = 0.0087
Early stopping triggered at epoch 55.
Using device: mps
Epoch 1: Train loss = 0.0425, Val loss = 0.0245
Epoch 2: Train loss = 0.0402, Val loss = 0.0225
Epoch 3: Train loss = 0.0375, Val loss = 0.0198
Epoch 4: Train loss = 0.0338, Val loss = 0.0161
Epoch 5: Train loss = 0.0286, Val loss = 0.0116
Epoch 6: Train loss = 0.0219, Val loss = 0.0087
Epoch 7: Train loss = 0.0160, Val loss = 0.0108
Epoch 8: Train loss = 0.0132, Val loss = 0.0127
Epoch 9: Train loss = 0.0114, Val loss = 0.0088
Epoch 10: Train loss = 0.0097, Val loss = 0.0062
Epoch 11: Train loss = 0.0091, Val loss = 0.0060
Epoch 12: Train loss = 0.0088, Val loss = 0.0069
Epoch 13: Train loss = 0.0085, Val loss = 0.0074
Epoch 14: Train loss = 0.0084, Val loss = 0.0067
Epoch 15: Train loss = 0.0083, Val loss = 0.0062
Epoch 16: Train loss = 0.0082, Val loss = 0.0066
Epoch 17: Train loss = 0.0081, Val loss = 0.0070
Epoch 18: Train loss = 0.0079, Val loss = 0.0067
Epoch 19: Train loss = 0.0079, Val loss = 0.0066
Epoch 20: Train loss = 0.0080, Val loss = 0.0068
Epoch 21: Train loss = 0.0078, Val loss = 0.0072
Epoch 22: Train loss = 0.0078, Val loss = 0.0072
Epoch 23: Train loss = 0.0077, Val loss = 0.0067
Epoch 24: Train loss = 0.0076, Val loss = 0.0071
Epoch 25: Train loss = 0.0077, Val loss = 0.0075
Epoch 26: Train loss = 0.0075, Val loss = 0.0075
Epoch 27: Train loss = 0.0076, Val loss = 0.0070
Epoch 28: Train loss = 0.0075, Val loss = 0.0076
Epoch 29: Train loss = 0.0075, Val loss = 0.0076
Epoch 30: Train loss = 0.0075, Val loss = 0.0071
Epoch 31: Train loss = 0.0075, Val loss = 0.0078
Epoch 32: Train loss = 0.0073, Val loss = 0.0079
Epoch 33: Train loss = 0.0073, Val loss = 0.0075
Epoch 34: Train loss = 0.0073, Val loss = 0.0079
Epoch 35: Train loss = 0.0073, Val loss = 0.0077
Epoch 36: Train loss = 0.0072, Val loss = 0.0077
Epoch 37: Train loss = 0.0073, Val loss = 0.0080
Epoch 38: Train loss = 0.0071, Val loss = 0.0076
Epoch 39: Train loss = 0.0071, Val loss = 0.0079
Epoch 40: Train loss = 0.0071, Val loss = 0.0076
Epoch 41: Train loss = 0.0072, Val loss = 0.0078
Epoch 42: Train loss = 0.0071, Val loss = 0.0079
Epoch 43: Train loss = 0.0070, Val loss = 0.0076
Epoch 44: Train loss = 0.0071, Val loss = 0.0079
Epoch 45: Train loss = 0.0071, Val loss = 0.0076
Epoch 46: Train loss = 0.0070, Val loss = 0.0076
Epoch 47: Train loss = 0.0069, Val loss = 0.0078
Epoch 48: Train loss = 0.0070, Val loss = 0.0074
Epoch 49: Train loss = 0.0068, Val loss = 0.0079
Epoch 50: Train loss = 0.0069, Val loss = 0.0070
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0402, Val loss = 0.0096
Epoch 2: Train loss = 0.0143, Val loss = 0.0039
Epoch 3: Train loss = 0.0099, Val loss = 0.0087
Epoch 4: Train loss = 0.0088, Val loss = 0.0050
Epoch 5: Train loss = 0.0078, Val loss = 0.0077
Epoch 6: Train loss = 0.0076, Val loss = 0.0059
Epoch 7: Train loss = 0.0075, Val loss = 0.0076
Epoch 8: Train loss = 0.0074, Val loss = 0.0072
Epoch 9: Train loss = 0.0073, Val loss = 0.0067
Epoch 10: Train loss = 0.0071, Val loss = 0.0071
Epoch 11: Train loss = 0.0071, Val loss = 0.0087
Epoch 12: Train loss = 0.0071, Val loss = 0.0062
Epoch 13: Train loss = 0.0071, Val loss = 0.0057
Epoch 14: Train loss = 0.0068, Val loss = 0.0070
Epoch 15: Train loss = 0.0069, Val loss = 0.0095
Epoch 16: Train loss = 0.0067, Val loss = 0.0093
Epoch 17: Train loss = 0.0068, Val loss = 0.0076
Epoch 18: Train loss = 0.0068, Val loss = 0.0057
Epoch 19: Train loss = 0.0066, Val loss = 0.0051
Epoch 20: Train loss = 0.0067, Val loss = 0.0069
Epoch 21: Train loss = 0.0067, Val loss = 0.0091
Epoch 22: Train loss = 0.0065, Val loss = 0.0078
Epoch 23: Train loss = 0.0063, Val loss = 0.0060
Epoch 24: Train loss = 0.0064, Val loss = 0.0062
Epoch 25: Train loss = 0.0063, Val loss = 0.0069
Epoch 26: Train loss = 0.0062, Val loss = 0.0075
Epoch 27: Train loss = 0.0063, Val loss = 0.0063
Epoch 28: Train loss = 0.0062, Val loss = 0.0058
Epoch 29: Train loss = 0.0059, Val loss = 0.0074
Epoch 30: Train loss = 0.0059, Val loss = 0.0059
Epoch 31: Train loss = 0.0058, Val loss = 0.0044
Epoch 32: Train loss = 0.0059, Val loss = 0.0074
Epoch 33: Train loss = 0.0058, Val loss = 0.0048
Epoch 34: Train loss = 0.0057, Val loss = 0.0064
Epoch 35: Train loss = 0.0059, Val loss = 0.0046
Epoch 36: Train loss = 0.0058, Val loss = 0.0061
Epoch 37: Train loss = 0.0056, Val loss = 0.0071
Epoch 38: Train loss = 0.0055, Val loss = 0.0045
Epoch 39: Train loss = 0.0054, Val loss = 0.0065
Epoch 40: Train loss = 0.0054, Val loss = 0.0050
Epoch 41: Train loss = 0.0053, Val loss = 0.0060
Epoch 42: Train loss = 0.0053, Val loss = 0.0045
Epoch 43: Train loss = 0.0054, Val loss = 0.0049
Epoch 44: Train loss = 0.0051, Val loss = 0.0066
Epoch 45: Train loss = 0.0049, Val loss = 0.0064
Epoch 46: Train loss = 0.0050, Val loss = 0.0051
Epoch 47: Train loss = 0.0049, Val loss = 0.0061
Epoch 48: Train loss = 0.0048, Val loss = 0.0075
Epoch 49: Train loss = 0.0048, Val loss = 0.0064
Epoch 50: Train loss = 0.0047, Val loss = 0.0067
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0484, Val loss = 0.0301
Epoch 2: Train loss = 0.0469, Val loss = 0.0286
Epoch 3: Train loss = 0.0451, Val loss = 0.0274
Epoch 4: Train loss = 0.0435, Val loss = 0.0264
Epoch 5: Train loss = 0.0422, Val loss = 0.0256
Epoch 6: Train loss = 0.0409, Val loss = 0.0249
Epoch 7: Train loss = 0.0399, Val loss = 0.0244
Epoch 8: Train loss = 0.0388, Val loss = 0.0239
Epoch 9: Train loss = 0.0380, Val loss = 0.0234
Epoch 10: Train loss = 0.0370, Val loss = 0.0230
Epoch 11: Train loss = 0.0362, Val loss = 0.0226
Epoch 12: Train loss = 0.0353, Val loss = 0.0222
Epoch 13: Train loss = 0.0345, Val loss = 0.0218
Epoch 14: Train loss = 0.0336, Val loss = 0.0214
Epoch 15: Train loss = 0.0328, Val loss = 0.0210
Epoch 16: Train loss = 0.0319, Val loss = 0.0206
Epoch 17: Train loss = 0.0312, Val loss = 0.0201
Epoch 18: Train loss = 0.0305, Val loss = 0.0197
Epoch 19: Train loss = 0.0297, Val loss = 0.0193
Epoch 20: Train loss = 0.0289, Val loss = 0.0188
Epoch 21: Train loss = 0.0282, Val loss = 0.0183
Epoch 22: Train loss = 0.0274, Val loss = 0.0178
Epoch 23: Train loss = 0.0265, Val loss = 0.0172
Epoch 24: Train loss = 0.0255, Val loss = 0.0167
Epoch 25: Train loss = 0.0245, Val loss = 0.0161
Epoch 26: Train loss = 0.0236, Val loss = 0.0157
Epoch 27: Train loss = 0.0226, Val loss = 0.0154
Epoch 28: Train loss = 0.0217, Val loss = 0.0152
Epoch 29: Train loss = 0.0207, Val loss = 0.0150
Epoch 30: Train loss = 0.0199, Val loss = 0.0149
Epoch 31: Train loss = 0.0192, Val loss = 0.0149
Epoch 32: Train loss = 0.0184, Val loss = 0.0150
Epoch 33: Train loss = 0.0178, Val loss = 0.0150
Epoch 34: Train loss = 0.0172, Val loss = 0.0149
Epoch 35: Train loss = 0.0166, Val loss = 0.0147
Epoch 36: Train loss = 0.0161, Val loss = 0.0145
Epoch 37: Train loss = 0.0156, Val loss = 0.0142
Epoch 38: Train loss = 0.0151, Val loss = 0.0139
Epoch 39: Train loss = 0.0146, Val loss = 0.0135
Epoch 40: Train loss = 0.0141, Val loss = 0.0132
Epoch 41: Train loss = 0.0137, Val loss = 0.0129
Epoch 42: Train loss = 0.0132, Val loss = 0.0127
Epoch 43: Train loss = 0.0128, Val loss = 0.0124
Epoch 44: Train loss = 0.0125, Val loss = 0.0123
Epoch 45: Train loss = 0.0121, Val loss = 0.0121
Epoch 46: Train loss = 0.0117, Val loss = 0.0120
Epoch 47: Train loss = 0.0114, Val loss = 0.0118
Epoch 48: Train loss = 0.0111, Val loss = 0.0117
Epoch 49: Train loss = 0.0108, Val loss = 0.0116
Epoch 50: Train loss = 0.0106, Val loss = 0.0115
Epoch 51: Train loss = 0.0103, Val loss = 0.0114
Epoch 52: Train loss = 0.0100, Val loss = 0.0113
Epoch 53: Train loss = 0.0098, Val loss = 0.0112
Epoch 54: Train loss = 0.0096, Val loss = 0.0111
Epoch 55: Train loss = 0.0094, Val loss = 0.0110
Epoch 56: Train loss = 0.0092, Val loss = 0.0109
Epoch 57: Train loss = 0.0091, Val loss = 0.0108
Epoch 58: Train loss = 0.0089, Val loss = 0.0107
Epoch 59: Train loss = 0.0087, Val loss = 0.0106
Epoch 60: Train loss = 0.0086, Val loss = 0.0106
Epoch 61: Train loss = 0.0085, Val loss = 0.0105
Epoch 62: Train loss = 0.0083, Val loss = 0.0105
Epoch 63: Train loss = 0.0082, Val loss = 0.0104
Epoch 64: Train loss = 0.0081, Val loss = 0.0104
Epoch 65: Train loss = 0.0080, Val loss = 0.0104
Epoch 66: Train loss = 0.0079, Val loss = 0.0103
Epoch 67: Train loss = 0.0078, Val loss = 0.0103
Epoch 68: Train loss = 0.0077, Val loss = 0.0102
Epoch 69: Train loss = 0.0076, Val loss = 0.0102
Epoch 70: Train loss = 0.0075, Val loss = 0.0101
Epoch 71: Train loss = 0.0075, Val loss = 0.0101
Epoch 72: Train loss = 0.0074, Val loss = 0.0101
Epoch 73: Train loss = 0.0073, Val loss = 0.0101
Epoch 74: Train loss = 0.0073, Val loss = 0.0101
Epoch 75: Train loss = 0.0072, Val loss = 0.0101
Epoch 76: Train loss = 0.0072, Val loss = 0.0101
Epoch 77: Train loss = 0.0071, Val loss = 0.0101
Epoch 78: Train loss = 0.0071, Val loss = 0.0101
Epoch 79: Train loss = 0.0070, Val loss = 0.0101
Epoch 80: Train loss = 0.0070, Val loss = 0.0101
Epoch 81: Train loss = 0.0069, Val loss = 0.0101
Epoch 82: Train loss = 0.0069, Val loss = 0.0101
Epoch 83: Train loss = 0.0069, Val loss = 0.0101
Epoch 84: Train loss = 0.0068, Val loss = 0.0101
Epoch 85: Train loss = 0.0068, Val loss = 0.0101
Epoch 86: Train loss = 0.0068, Val loss = 0.0101
Epoch 87: Train loss = 0.0068, Val loss = 0.0101
Early stopping triggered at epoch 87.
Using device: mps
Epoch 1: Train loss = 0.0544, Val loss = 0.0311
Epoch 2: Train loss = 0.0452, Val loss = 0.0249
Epoch 3: Train loss = 0.0370, Val loss = 0.0209
Epoch 4: Train loss = 0.0295, Val loss = 0.0185
Epoch 5: Train loss = 0.0230, Val loss = 0.0167
Epoch 6: Train loss = 0.0176, Val loss = 0.0152
Epoch 7: Train loss = 0.0135, Val loss = 0.0134
Epoch 8: Train loss = 0.0108, Val loss = 0.0118
Epoch 9: Train loss = 0.0093, Val loss = 0.0107
Epoch 10: Train loss = 0.0083, Val loss = 0.0096
Epoch 11: Train loss = 0.0076, Val loss = 0.0089
Epoch 12: Train loss = 0.0071, Val loss = 0.0091
Epoch 13: Train loss = 0.0069, Val loss = 0.0094
Epoch 14: Train loss = 0.0069, Val loss = 0.0091
Epoch 15: Train loss = 0.0068, Val loss = 0.0087
Epoch 16: Train loss = 0.0068, Val loss = 0.0087
Epoch 17: Train loss = 0.0067, Val loss = 0.0092
Epoch 18: Train loss = 0.0067, Val loss = 0.0094
Epoch 19: Train loss = 0.0067, Val loss = 0.0094
Epoch 20: Train loss = 0.0066, Val loss = 0.0093
Epoch 21: Train loss = 0.0066, Val loss = 0.0093
Epoch 22: Train loss = 0.0065, Val loss = 0.0092
Epoch 23: Train loss = 0.0065, Val loss = 0.0092
Epoch 24: Train loss = 0.0065, Val loss = 0.0094
Epoch 25: Train loss = 0.0065, Val loss = 0.0095
Epoch 26: Train loss = 0.0065, Val loss = 0.0095
Epoch 27: Train loss = 0.0065, Val loss = 0.0091
Epoch 28: Train loss = 0.0065, Val loss = 0.0094
Epoch 29: Train loss = 0.0064, Val loss = 0.0098
Epoch 30: Train loss = 0.0064, Val loss = 0.0096
Epoch 31: Train loss = 0.0064, Val loss = 0.0093
Epoch 32: Train loss = 0.0064, Val loss = 0.0094
Epoch 33: Train loss = 0.0064, Val loss = 0.0095
Epoch 34: Train loss = 0.0064, Val loss = 0.0096
Epoch 35: Train loss = 0.0064, Val loss = 0.0097
Epoch 36: Train loss = 0.0064, Val loss = 0.0097
Epoch 37: Train loss = 0.0064, Val loss = 0.0094
Epoch 38: Train loss = 0.0064, Val loss = 0.0095
Epoch 39: Train loss = 0.0063, Val loss = 0.0097
Epoch 40: Train loss = 0.0064, Val loss = 0.0096
Epoch 41: Train loss = 0.0063, Val loss = 0.0096
Epoch 42: Train loss = 0.0063, Val loss = 0.0095
Epoch 43: Train loss = 0.0063, Val loss = 0.0094
Epoch 44: Train loss = 0.0063, Val loss = 0.0098
Epoch 45: Train loss = 0.0063, Val loss = 0.0098
Epoch 46: Train loss = 0.0062, Val loss = 0.0096
Epoch 47: Train loss = 0.0062, Val loss = 0.0095
Epoch 48: Train loss = 0.0063, Val loss = 0.0095
Epoch 49: Train loss = 0.0062, Val loss = 0.0096
Epoch 50: Train loss = 0.0062, Val loss = 0.0098
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0640, Val loss = 0.0436
Epoch 2: Train loss = 0.0619, Val loss = 0.0419
Epoch 3: Train loss = 0.0604, Val loss = 0.0406
Epoch 4: Train loss = 0.0592, Val loss = 0.0395
Epoch 5: Train loss = 0.0580, Val loss = 0.0384
Epoch 6: Train loss = 0.0567, Val loss = 0.0373
Epoch 7: Train loss = 0.0552, Val loss = 0.0361
Epoch 8: Train loss = 0.0537, Val loss = 0.0348
Epoch 9: Train loss = 0.0522, Val loss = 0.0336
Epoch 10: Train loss = 0.0505, Val loss = 0.0324
Epoch 11: Train loss = 0.0490, Val loss = 0.0312
Epoch 12: Train loss = 0.0474, Val loss = 0.0300
Epoch 13: Train loss = 0.0458, Val loss = 0.0290
Epoch 14: Train loss = 0.0442, Val loss = 0.0282
Epoch 15: Train loss = 0.0426, Val loss = 0.0275
Epoch 16: Train loss = 0.0412, Val loss = 0.0269
Epoch 17: Train loss = 0.0399, Val loss = 0.0263
Epoch 18: Train loss = 0.0386, Val loss = 0.0259
Epoch 19: Train loss = 0.0375, Val loss = 0.0254
Epoch 20: Train loss = 0.0364, Val loss = 0.0250
Epoch 21: Train loss = 0.0353, Val loss = 0.0245
Epoch 22: Train loss = 0.0342, Val loss = 0.0239
Epoch 23: Train loss = 0.0333, Val loss = 0.0234
Epoch 24: Train loss = 0.0322, Val loss = 0.0228
Epoch 25: Train loss = 0.0312, Val loss = 0.0222
Epoch 26: Train loss = 0.0302, Val loss = 0.0216
Epoch 27: Train loss = 0.0292, Val loss = 0.0210
Epoch 28: Train loss = 0.0282, Val loss = 0.0204
Epoch 29: Train loss = 0.0272, Val loss = 0.0197
Epoch 30: Train loss = 0.0263, Val loss = 0.0190
Epoch 31: Train loss = 0.0253, Val loss = 0.0184
Epoch 32: Train loss = 0.0243, Val loss = 0.0177
Epoch 33: Train loss = 0.0233, Val loss = 0.0171
Epoch 34: Train loss = 0.0224, Val loss = 0.0165
Epoch 35: Train loss = 0.0215, Val loss = 0.0159
Epoch 36: Train loss = 0.0206, Val loss = 0.0153
Epoch 37: Train loss = 0.0198, Val loss = 0.0149
Epoch 38: Train loss = 0.0190, Val loss = 0.0144
Epoch 39: Train loss = 0.0182, Val loss = 0.0140
Epoch 40: Train loss = 0.0174, Val loss = 0.0135
Epoch 41: Train loss = 0.0166, Val loss = 0.0131
Epoch 42: Train loss = 0.0158, Val loss = 0.0127
Epoch 43: Train loss = 0.0152, Val loss = 0.0123
Epoch 44: Train loss = 0.0145, Val loss = 0.0120
Epoch 45: Train loss = 0.0139, Val loss = 0.0117
Epoch 46: Train loss = 0.0133, Val loss = 0.0114
Epoch 47: Train loss = 0.0127, Val loss = 0.0111
Epoch 48: Train loss = 0.0122, Val loss = 0.0108
Epoch 49: Train loss = 0.0117, Val loss = 0.0106
Epoch 50: Train loss = 0.0113, Val loss = 0.0103
Epoch 51: Train loss = 0.0108, Val loss = 0.0101
Epoch 52: Train loss = 0.0104, Val loss = 0.0099
Epoch 53: Train loss = 0.0101, Val loss = 0.0097
Epoch 54: Train loss = 0.0097, Val loss = 0.0096
Epoch 55: Train loss = 0.0094, Val loss = 0.0095
Epoch 56: Train loss = 0.0091, Val loss = 0.0094
Epoch 57: Train loss = 0.0088, Val loss = 0.0094
Epoch 58: Train loss = 0.0086, Val loss = 0.0093
Epoch 59: Train loss = 0.0084, Val loss = 0.0093
Epoch 60: Train loss = 0.0081, Val loss = 0.0093
Epoch 61: Train loss = 0.0080, Val loss = 0.0093
Epoch 62: Train loss = 0.0078, Val loss = 0.0093
Epoch 63: Train loss = 0.0077, Val loss = 0.0093
Epoch 64: Train loss = 0.0076, Val loss = 0.0093
Epoch 65: Train loss = 0.0075, Val loss = 0.0093
Epoch 66: Train loss = 0.0073, Val loss = 0.0093
Epoch 67: Train loss = 0.0073, Val loss = 0.0093
Epoch 68: Train loss = 0.0072, Val loss = 0.0093
Early stopping triggered at epoch 68.
Using device: mps
Epoch 1: Train loss = 0.0399, Val loss = 0.0214
Epoch 2: Train loss = 0.0312, Val loss = 0.0199
Epoch 3: Train loss = 0.0252, Val loss = 0.0151
Epoch 4: Train loss = 0.0200, Val loss = 0.0139
Epoch 5: Train loss = 0.0165, Val loss = 0.0143
Epoch 6: Train loss = 0.0138, Val loss = 0.0135
Epoch 7: Train loss = 0.0114, Val loss = 0.0111
Epoch 8: Train loss = 0.0096, Val loss = 0.0100
Epoch 9: Train loss = 0.0084, Val loss = 0.0101
Epoch 10: Train loss = 0.0078, Val loss = 0.0095
Epoch 11: Train loss = 0.0074, Val loss = 0.0088
Epoch 12: Train loss = 0.0070, Val loss = 0.0090
Epoch 13: Train loss = 0.0069, Val loss = 0.0092
Epoch 14: Train loss = 0.0067, Val loss = 0.0092
Epoch 15: Train loss = 0.0067, Val loss = 0.0095
Epoch 16: Train loss = 0.0067, Val loss = 0.0092
Epoch 17: Train loss = 0.0066, Val loss = 0.0090
Epoch 18: Train loss = 0.0066, Val loss = 0.0092
Epoch 19: Train loss = 0.0066, Val loss = 0.0096
Epoch 20: Train loss = 0.0065, Val loss = 0.0092
Epoch 21: Train loss = 0.0065, Val loss = 0.0095
Epoch 22: Train loss = 0.0065, Val loss = 0.0092
Epoch 23: Train loss = 0.0064, Val loss = 0.0095
Epoch 24: Train loss = 0.0064, Val loss = 0.0097
Epoch 25: Train loss = 0.0064, Val loss = 0.0093
Epoch 26: Train loss = 0.0064, Val loss = 0.0099
Epoch 27: Train loss = 0.0063, Val loss = 0.0097
Epoch 28: Train loss = 0.0064, Val loss = 0.0094
Epoch 29: Train loss = 0.0063, Val loss = 0.0104
Epoch 30: Train loss = 0.0063, Val loss = 0.0095
Epoch 31: Train loss = 0.0063, Val loss = 0.0099
Epoch 32: Train loss = 0.0063, Val loss = 0.0101
Epoch 33: Train loss = 0.0063, Val loss = 0.0096
Epoch 34: Train loss = 0.0063, Val loss = 0.0104
Epoch 35: Train loss = 0.0062, Val loss = 0.0097
Epoch 36: Train loss = 0.0062, Val loss = 0.0101
Epoch 37: Train loss = 0.0062, Val loss = 0.0096
Epoch 38: Train loss = 0.0062, Val loss = 0.0101
Epoch 39: Train loss = 0.0062, Val loss = 0.0097
Epoch 40: Train loss = 0.0061, Val loss = 0.0103
Epoch 41: Train loss = 0.0061, Val loss = 0.0097
Epoch 42: Train loss = 0.0061, Val loss = 0.0100
Epoch 43: Train loss = 0.0061, Val loss = 0.0100
Epoch 44: Train loss = 0.0061, Val loss = 0.0099
Epoch 45: Train loss = 0.0060, Val loss = 0.0103
Epoch 46: Train loss = 0.0060, Val loss = 0.0091
Epoch 47: Train loss = 0.0060, Val loss = 0.0101
Epoch 48: Train loss = 0.0060, Val loss = 0.0091
Epoch 49: Train loss = 0.0060, Val loss = 0.0102
Epoch 50: Train loss = 0.0059, Val loss = 0.0094
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0430, Val loss = 0.0266
Epoch 2: Train loss = 0.0423, Val loss = 0.0260
Epoch 3: Train loss = 0.0416, Val loss = 0.0254
Epoch 4: Train loss = 0.0409, Val loss = 0.0247
Epoch 5: Train loss = 0.0401, Val loss = 0.0240
Epoch 6: Train loss = 0.0393, Val loss = 0.0233
Epoch 7: Train loss = 0.0385, Val loss = 0.0226
Epoch 8: Train loss = 0.0377, Val loss = 0.0220
Epoch 9: Train loss = 0.0370, Val loss = 0.0215
Epoch 10: Train loss = 0.0362, Val loss = 0.0209
Epoch 11: Train loss = 0.0355, Val loss = 0.0204
Epoch 12: Train loss = 0.0349, Val loss = 0.0199
Epoch 13: Train loss = 0.0342, Val loss = 0.0194
Epoch 14: Train loss = 0.0334, Val loss = 0.0188
Epoch 15: Train loss = 0.0328, Val loss = 0.0183
Epoch 16: Train loss = 0.0320, Val loss = 0.0178
Epoch 17: Train loss = 0.0313, Val loss = 0.0172
Epoch 18: Train loss = 0.0305, Val loss = 0.0167
Epoch 19: Train loss = 0.0297, Val loss = 0.0162
Epoch 20: Train loss = 0.0289, Val loss = 0.0157
Epoch 21: Train loss = 0.0282, Val loss = 0.0152
Epoch 22: Train loss = 0.0273, Val loss = 0.0147
Epoch 23: Train loss = 0.0265, Val loss = 0.0142
Epoch 24: Train loss = 0.0255, Val loss = 0.0137
Epoch 25: Train loss = 0.0248, Val loss = 0.0133
Epoch 26: Train loss = 0.0239, Val loss = 0.0129
Epoch 27: Train loss = 0.0231, Val loss = 0.0126
Epoch 28: Train loss = 0.0222, Val loss = 0.0122
Epoch 29: Train loss = 0.0214, Val loss = 0.0119
Epoch 30: Train loss = 0.0206, Val loss = 0.0117
Epoch 31: Train loss = 0.0199, Val loss = 0.0114
Epoch 32: Train loss = 0.0191, Val loss = 0.0112
Epoch 33: Train loss = 0.0184, Val loss = 0.0110
Epoch 34: Train loss = 0.0177, Val loss = 0.0109
Epoch 35: Train loss = 0.0170, Val loss = 0.0107
Epoch 36: Train loss = 0.0163, Val loss = 0.0106
Epoch 37: Train loss = 0.0158, Val loss = 0.0104
Epoch 38: Train loss = 0.0151, Val loss = 0.0102
Epoch 39: Train loss = 0.0146, Val loss = 0.0100
Epoch 40: Train loss = 0.0140, Val loss = 0.0098
Epoch 41: Train loss = 0.0135, Val loss = 0.0096
Epoch 42: Train loss = 0.0130, Val loss = 0.0094
Epoch 43: Train loss = 0.0125, Val loss = 0.0092
Epoch 44: Train loss = 0.0120, Val loss = 0.0090
Epoch 45: Train loss = 0.0116, Val loss = 0.0089
Epoch 46: Train loss = 0.0112, Val loss = 0.0088
Epoch 47: Train loss = 0.0108, Val loss = 0.0087
Epoch 48: Train loss = 0.0105, Val loss = 0.0086
Epoch 49: Train loss = 0.0102, Val loss = 0.0085
Epoch 50: Train loss = 0.0099, Val loss = 0.0084
Epoch 51: Train loss = 0.0096, Val loss = 0.0084
Epoch 52: Train loss = 0.0093, Val loss = 0.0083
Epoch 53: Train loss = 0.0091, Val loss = 0.0083
Epoch 54: Train loss = 0.0089, Val loss = 0.0083
Epoch 55: Train loss = 0.0087, Val loss = 0.0083
Epoch 56: Train loss = 0.0086, Val loss = 0.0082
Epoch 57: Train loss = 0.0084, Val loss = 0.0082
Epoch 58: Train loss = 0.0083, Val loss = 0.0082
Epoch 59: Train loss = 0.0081, Val loss = 0.0081
Epoch 60: Train loss = 0.0080, Val loss = 0.0081
Epoch 61: Train loss = 0.0080, Val loss = 0.0081
Epoch 62: Train loss = 0.0078, Val loss = 0.0081
Epoch 63: Train loss = 0.0077, Val loss = 0.0081
Epoch 64: Train loss = 0.0077, Val loss = 0.0081
Epoch 65: Train loss = 0.0076, Val loss = 0.0080
Epoch 66: Train loss = 0.0075, Val loss = 0.0080
Epoch 67: Train loss = 0.0074, Val loss = 0.0080
Epoch 68: Train loss = 0.0074, Val loss = 0.0080
Epoch 69: Train loss = 0.0073, Val loss = 0.0080
Epoch 70: Train loss = 0.0073, Val loss = 0.0080
Epoch 71: Train loss = 0.0072, Val loss = 0.0080
Early stopping triggered at epoch 71.
Using device: mps
Epoch 1: Train loss = 0.0545, Val loss = 0.0329
Epoch 2: Train loss = 0.0492, Val loss = 0.0289
Epoch 3: Train loss = 0.0443, Val loss = 0.0242
Epoch 4: Train loss = 0.0383, Val loss = 0.0191
Epoch 5: Train loss = 0.0311, Val loss = 0.0164
Epoch 6: Train loss = 0.0246, Val loss = 0.0192
Epoch 7: Train loss = 0.0208, Val loss = 0.0206
Epoch 8: Train loss = 0.0173, Val loss = 0.0156
Epoch 9: Train loss = 0.0136, Val loss = 0.0107
Epoch 10: Train loss = 0.0113, Val loss = 0.0082
Epoch 11: Train loss = 0.0098, Val loss = 0.0085
Epoch 12: Train loss = 0.0085, Val loss = 0.0103
Epoch 13: Train loss = 0.0077, Val loss = 0.0110
Epoch 14: Train loss = 0.0073, Val loss = 0.0094
Epoch 15: Train loss = 0.0070, Val loss = 0.0081
Epoch 16: Train loss = 0.0069, Val loss = 0.0083
Epoch 17: Train loss = 0.0069, Val loss = 0.0094
Epoch 18: Train loss = 0.0068, Val loss = 0.0093
Epoch 19: Train loss = 0.0068, Val loss = 0.0087
Epoch 20: Train loss = 0.0067, Val loss = 0.0087
Epoch 21: Train loss = 0.0066, Val loss = 0.0090
Epoch 22: Train loss = 0.0066, Val loss = 0.0091
Epoch 23: Train loss = 0.0066, Val loss = 0.0090
Epoch 24: Train loss = 0.0066, Val loss = 0.0090
Epoch 25: Train loss = 0.0065, Val loss = 0.0090
Epoch 26: Train loss = 0.0065, Val loss = 0.0091
Epoch 27: Train loss = 0.0065, Val loss = 0.0093
Epoch 28: Train loss = 0.0065, Val loss = 0.0094
Epoch 29: Train loss = 0.0065, Val loss = 0.0092
Epoch 30: Train loss = 0.0065, Val loss = 0.0095
Epoch 31: Train loss = 0.0065, Val loss = 0.0097
Epoch 32: Train loss = 0.0065, Val loss = 0.0094
Epoch 33: Train loss = 0.0065, Val loss = 0.0095
Epoch 34: Train loss = 0.0065, Val loss = 0.0096
Epoch 35: Train loss = 0.0065, Val loss = 0.0095
Epoch 36: Train loss = 0.0065, Val loss = 0.0095
Epoch 37: Train loss = 0.0065, Val loss = 0.0097
Epoch 38: Train loss = 0.0065, Val loss = 0.0096
Epoch 39: Train loss = 0.0064, Val loss = 0.0100
Epoch 40: Train loss = 0.0064, Val loss = 0.0096
Epoch 41: Train loss = 0.0064, Val loss = 0.0096
Epoch 42: Train loss = 0.0064, Val loss = 0.0100
Epoch 43: Train loss = 0.0064, Val loss = 0.0098
Epoch 44: Train loss = 0.0064, Val loss = 0.0095
Epoch 45: Train loss = 0.0064, Val loss = 0.0100
Epoch 46: Train loss = 0.0064, Val loss = 0.0102
Epoch 47: Train loss = 0.0064, Val loss = 0.0095
Epoch 48: Train loss = 0.0064, Val loss = 0.0096
Epoch 49: Train loss = 0.0064, Val loss = 0.0106
Epoch 50: Train loss = 0.0064, Val loss = 0.0099
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0658, Val loss = 0.0448
Epoch 2: Train loss = 0.0651, Val loss = 0.0441
Epoch 3: Train loss = 0.0643, Val loss = 0.0434
Epoch 4: Train loss = 0.0634, Val loss = 0.0427
Epoch 5: Train loss = 0.0627, Val loss = 0.0421
Epoch 6: Train loss = 0.0620, Val loss = 0.0416
Epoch 7: Train loss = 0.0614, Val loss = 0.0411
Epoch 8: Train loss = 0.0608, Val loss = 0.0406
Epoch 9: Train loss = 0.0602, Val loss = 0.0401
Epoch 10: Train loss = 0.0594, Val loss = 0.0395
Epoch 11: Train loss = 0.0589, Val loss = 0.0390
Epoch 12: Train loss = 0.0583, Val loss = 0.0384
Epoch 13: Train loss = 0.0577, Val loss = 0.0379
Epoch 14: Train loss = 0.0571, Val loss = 0.0373
Epoch 15: Train loss = 0.0564, Val loss = 0.0368
Epoch 16: Train loss = 0.0559, Val loss = 0.0362
Epoch 17: Train loss = 0.0551, Val loss = 0.0357
Epoch 18: Train loss = 0.0544, Val loss = 0.0351
Epoch 19: Train loss = 0.0539, Val loss = 0.0345
Epoch 20: Train loss = 0.0531, Val loss = 0.0339
Epoch 21: Train loss = 0.0523, Val loss = 0.0333
Epoch 22: Train loss = 0.0517, Val loss = 0.0327
Epoch 23: Train loss = 0.0508, Val loss = 0.0320
Epoch 24: Train loss = 0.0500, Val loss = 0.0313
Epoch 25: Train loss = 0.0492, Val loss = 0.0306
Epoch 26: Train loss = 0.0483, Val loss = 0.0299
Epoch 27: Train loss = 0.0475, Val loss = 0.0291
Epoch 28: Train loss = 0.0466, Val loss = 0.0283
Epoch 29: Train loss = 0.0455, Val loss = 0.0275
Epoch 30: Train loss = 0.0446, Val loss = 0.0266
Epoch 31: Train loss = 0.0435, Val loss = 0.0257
Epoch 32: Train loss = 0.0424, Val loss = 0.0248
Epoch 33: Train loss = 0.0412, Val loss = 0.0239
Epoch 34: Train loss = 0.0401, Val loss = 0.0230
Epoch 35: Train loss = 0.0388, Val loss = 0.0220
Epoch 36: Train loss = 0.0376, Val loss = 0.0211
Epoch 37: Train loss = 0.0361, Val loss = 0.0202
Epoch 38: Train loss = 0.0349, Val loss = 0.0193
Epoch 39: Train loss = 0.0334, Val loss = 0.0184
Epoch 40: Train loss = 0.0322, Val loss = 0.0176
Epoch 41: Train loss = 0.0308, Val loss = 0.0168
Epoch 42: Train loss = 0.0293, Val loss = 0.0160
Epoch 43: Train loss = 0.0280, Val loss = 0.0154
Epoch 44: Train loss = 0.0267, Val loss = 0.0147
Epoch 45: Train loss = 0.0254, Val loss = 0.0142
Epoch 46: Train loss = 0.0241, Val loss = 0.0137
Epoch 47: Train loss = 0.0229, Val loss = 0.0133
Epoch 48: Train loss = 0.0218, Val loss = 0.0130
Epoch 49: Train loss = 0.0207, Val loss = 0.0128
Epoch 50: Train loss = 0.0196, Val loss = 0.0127
Epoch 51: Train loss = 0.0187, Val loss = 0.0126
Epoch 52: Train loss = 0.0177, Val loss = 0.0125
Epoch 53: Train loss = 0.0169, Val loss = 0.0124
Epoch 54: Train loss = 0.0161, Val loss = 0.0123
Epoch 55: Train loss = 0.0154, Val loss = 0.0122
Epoch 56: Train loss = 0.0147, Val loss = 0.0120
Epoch 57: Train loss = 0.0142, Val loss = 0.0118
Epoch 58: Train loss = 0.0136, Val loss = 0.0116
Epoch 59: Train loss = 0.0131, Val loss = 0.0114
Epoch 60: Train loss = 0.0127, Val loss = 0.0112
Epoch 61: Train loss = 0.0122, Val loss = 0.0111
Epoch 62: Train loss = 0.0119, Val loss = 0.0110
Epoch 63: Train loss = 0.0114, Val loss = 0.0109
Epoch 64: Train loss = 0.0111, Val loss = 0.0107
Epoch 65: Train loss = 0.0108, Val loss = 0.0106
Epoch 66: Train loss = 0.0105, Val loss = 0.0105
Epoch 67: Train loss = 0.0102, Val loss = 0.0103
Epoch 68: Train loss = 0.0100, Val loss = 0.0102
Epoch 69: Train loss = 0.0097, Val loss = 0.0101
Epoch 70: Train loss = 0.0095, Val loss = 0.0100
Epoch 71: Train loss = 0.0093, Val loss = 0.0099
Epoch 72: Train loss = 0.0091, Val loss = 0.0099
Epoch 73: Train loss = 0.0089, Val loss = 0.0098
Epoch 74: Train loss = 0.0087, Val loss = 0.0097
Epoch 75: Train loss = 0.0086, Val loss = 0.0096
Epoch 76: Train loss = 0.0084, Val loss = 0.0096
Epoch 77: Train loss = 0.0083, Val loss = 0.0096
Epoch 78: Train loss = 0.0082, Val loss = 0.0095
Epoch 79: Train loss = 0.0080, Val loss = 0.0095
Epoch 80: Train loss = 0.0079, Val loss = 0.0095
Epoch 81: Train loss = 0.0078, Val loss = 0.0094
Epoch 82: Train loss = 0.0077, Val loss = 0.0094
Epoch 83: Train loss = 0.0076, Val loss = 0.0094
Epoch 84: Train loss = 0.0075, Val loss = 0.0094
Epoch 85: Train loss = 0.0075, Val loss = 0.0094
Epoch 86: Train loss = 0.0074, Val loss = 0.0093
Epoch 87: Train loss = 0.0073, Val loss = 0.0093
Epoch 88: Train loss = 0.0073, Val loss = 0.0093
Epoch 89: Train loss = 0.0072, Val loss = 0.0092
Epoch 90: Train loss = 0.0072, Val loss = 0.0093
Epoch 91: Train loss = 0.0071, Val loss = 0.0093
Epoch 92: Train loss = 0.0071, Val loss = 0.0093
Epoch 93: Train loss = 0.0070, Val loss = 0.0093
Epoch 94: Train loss = 0.0070, Val loss = 0.0093
Epoch 95: Train loss = 0.0069, Val loss = 0.0093
Epoch 96: Train loss = 0.0070, Val loss = 0.0092
Epoch 97: Train loss = 0.0069, Val loss = 0.0092
Epoch 98: Train loss = 0.0069, Val loss = 0.0092
Epoch 99: Train loss = 0.0069, Val loss = 0.0092
Epoch 100: Train loss = 0.0068, Val loss = 0.0093
Using device: mps
Epoch 1: Train loss = 0.0537, Val loss = 0.0332
Epoch 2: Train loss = 0.0494, Val loss = 0.0297
Epoch 3: Train loss = 0.0451, Val loss = 0.0262
Epoch 4: Train loss = 0.0405, Val loss = 0.0229
Epoch 5: Train loss = 0.0356, Val loss = 0.0203
Epoch 6: Train loss = 0.0304, Val loss = 0.0194
Epoch 7: Train loss = 0.0255, Val loss = 0.0198
Epoch 8: Train loss = 0.0213, Val loss = 0.0172
Epoch 9: Train loss = 0.0165, Val loss = 0.0132
Epoch 10: Train loss = 0.0125, Val loss = 0.0107
Epoch 11: Train loss = 0.0101, Val loss = 0.0103
Epoch 12: Train loss = 0.0089, Val loss = 0.0103
Epoch 13: Train loss = 0.0081, Val loss = 0.0101
Epoch 14: Train loss = 0.0077, Val loss = 0.0093
Epoch 15: Train loss = 0.0074, Val loss = 0.0089
Epoch 16: Train loss = 0.0071, Val loss = 0.0091
Epoch 17: Train loss = 0.0069, Val loss = 0.0090
Epoch 18: Train loss = 0.0068, Val loss = 0.0085
Epoch 19: Train loss = 0.0067, Val loss = 0.0083
Epoch 20: Train loss = 0.0067, Val loss = 0.0086
Epoch 21: Train loss = 0.0067, Val loss = 0.0090
Epoch 22: Train loss = 0.0066, Val loss = 0.0089
Epoch 23: Train loss = 0.0066, Val loss = 0.0086
Epoch 24: Train loss = 0.0066, Val loss = 0.0090
Epoch 25: Train loss = 0.0066, Val loss = 0.0091
Epoch 26: Train loss = 0.0065, Val loss = 0.0091
Epoch 27: Train loss = 0.0065, Val loss = 0.0092
Epoch 28: Train loss = 0.0065, Val loss = 0.0095
Epoch 29: Train loss = 0.0065, Val loss = 0.0092
Epoch 30: Train loss = 0.0065, Val loss = 0.0093
Epoch 31: Train loss = 0.0065, Val loss = 0.0095
Epoch 32: Train loss = 0.0065, Val loss = 0.0097
Epoch 33: Train loss = 0.0065, Val loss = 0.0095
Epoch 34: Train loss = 0.0065, Val loss = 0.0094
Epoch 35: Train loss = 0.0065, Val loss = 0.0096
Epoch 36: Train loss = 0.0065, Val loss = 0.0097
Epoch 37: Train loss = 0.0065, Val loss = 0.0097
Epoch 38: Train loss = 0.0064, Val loss = 0.0094
Epoch 39: Train loss = 0.0064, Val loss = 0.0096
Epoch 40: Train loss = 0.0064, Val loss = 0.0099
Epoch 41: Train loss = 0.0064, Val loss = 0.0095
Epoch 42: Train loss = 0.0064, Val loss = 0.0094
Epoch 43: Train loss = 0.0064, Val loss = 0.0097
Epoch 44: Train loss = 0.0064, Val loss = 0.0101
Epoch 45: Train loss = 0.0064, Val loss = 0.0094
Epoch 46: Train loss = 0.0064, Val loss = 0.0099
Epoch 47: Train loss = 0.0064, Val loss = 0.0100
Epoch 48: Train loss = 0.0064, Val loss = 0.0097
Epoch 49: Train loss = 0.0064, Val loss = 0.0096
Epoch 50: Train loss = 0.0063, Val loss = 0.0102
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0532, Val loss = 0.0318
Epoch 2: Train loss = 0.0491, Val loss = 0.0279
Epoch 3: Train loss = 0.0449, Val loss = 0.0241
Epoch 4: Train loss = 0.0409, Val loss = 0.0205
Epoch 5: Train loss = 0.0367, Val loss = 0.0170
Epoch 6: Train loss = 0.0324, Val loss = 0.0137
Epoch 7: Train loss = 0.0279, Val loss = 0.0109
Epoch 8: Train loss = 0.0235, Val loss = 0.0087
Epoch 9: Train loss = 0.0192, Val loss = 0.0077
Epoch 10: Train loss = 0.0156, Val loss = 0.0083
Epoch 11: Train loss = 0.0128, Val loss = 0.0101
Epoch 12: Train loss = 0.0110, Val loss = 0.0123
Epoch 13: Train loss = 0.0099, Val loss = 0.0139
Epoch 14: Train loss = 0.0091, Val loss = 0.0141
Epoch 15: Train loss = 0.0085, Val loss = 0.0134
Epoch 16: Train loss = 0.0079, Val loss = 0.0121
Epoch 17: Train loss = 0.0075, Val loss = 0.0108
Epoch 18: Train loss = 0.0072, Val loss = 0.0099
Epoch 19: Train loss = 0.0071, Val loss = 0.0093
Epoch 20: Train loss = 0.0071, Val loss = 0.0090
Epoch 21: Train loss = 0.0070, Val loss = 0.0091
Epoch 22: Train loss = 0.0070, Val loss = 0.0093
Epoch 23: Train loss = 0.0069, Val loss = 0.0096
Epoch 24: Train loss = 0.0068, Val loss = 0.0098
Epoch 25: Train loss = 0.0068, Val loss = 0.0099
Epoch 26: Train loss = 0.0067, Val loss = 0.0097
Epoch 27: Train loss = 0.0067, Val loss = 0.0096
Epoch 28: Train loss = 0.0067, Val loss = 0.0094
Epoch 29: Train loss = 0.0067, Val loss = 0.0094
Epoch 30: Train loss = 0.0066, Val loss = 0.0093
Epoch 31: Train loss = 0.0066, Val loss = 0.0094
Epoch 32: Train loss = 0.0065, Val loss = 0.0094
Epoch 33: Train loss = 0.0065, Val loss = 0.0094
Epoch 34: Train loss = 0.0065, Val loss = 0.0095
Epoch 35: Train loss = 0.0065, Val loss = 0.0095
Epoch 36: Train loss = 0.0065, Val loss = 0.0096
Epoch 37: Train loss = 0.0065, Val loss = 0.0096
Epoch 38: Train loss = 0.0065, Val loss = 0.0096
Epoch 39: Train loss = 0.0064, Val loss = 0.0096
Epoch 40: Train loss = 0.0064, Val loss = 0.0096
Epoch 41: Train loss = 0.0064, Val loss = 0.0097
Epoch 42: Train loss = 0.0064, Val loss = 0.0097
Epoch 43: Train loss = 0.0064, Val loss = 0.0098
Epoch 44: Train loss = 0.0064, Val loss = 0.0099
Epoch 45: Train loss = 0.0064, Val loss = 0.0099
Epoch 46: Train loss = 0.0064, Val loss = 0.0099
Epoch 47: Train loss = 0.0063, Val loss = 0.0099
Epoch 48: Train loss = 0.0063, Val loss = 0.0099
Epoch 49: Train loss = 0.0064, Val loss = 0.0100
Epoch 50: Train loss = 0.0063, Val loss = 0.0101
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0414, Val loss = 0.0089
Epoch 2: Train loss = 0.0152, Val loss = 0.0320
Epoch 3: Train loss = 0.0114, Val loss = 0.0106
Epoch 4: Train loss = 0.0085, Val loss = 0.0063
Epoch 5: Train loss = 0.0082, Val loss = 0.0117
Epoch 6: Train loss = 0.0075, Val loss = 0.0118
Epoch 7: Train loss = 0.0069, Val loss = 0.0092
Epoch 8: Train loss = 0.0069, Val loss = 0.0098
Epoch 9: Train loss = 0.0067, Val loss = 0.0105
Epoch 10: Train loss = 0.0066, Val loss = 0.0096
Epoch 11: Train loss = 0.0066, Val loss = 0.0091
Epoch 12: Train loss = 0.0065, Val loss = 0.0108
Epoch 13: Train loss = 0.0065, Val loss = 0.0102
Epoch 14: Train loss = 0.0064, Val loss = 0.0097
Epoch 15: Train loss = 0.0064, Val loss = 0.0104
Epoch 16: Train loss = 0.0064, Val loss = 0.0105
Epoch 17: Train loss = 0.0063, Val loss = 0.0099
Epoch 18: Train loss = 0.0063, Val loss = 0.0105
Epoch 19: Train loss = 0.0063, Val loss = 0.0100
Epoch 20: Train loss = 0.0062, Val loss = 0.0103
Epoch 21: Train loss = 0.0062, Val loss = 0.0100
Epoch 22: Train loss = 0.0062, Val loss = 0.0102
Epoch 23: Train loss = 0.0062, Val loss = 0.0104
Epoch 24: Train loss = 0.0062, Val loss = 0.0100
Epoch 25: Train loss = 0.0061, Val loss = 0.0106
Epoch 26: Train loss = 0.0061, Val loss = 0.0100
Epoch 27: Train loss = 0.0061, Val loss = 0.0100
Epoch 28: Train loss = 0.0060, Val loss = 0.0104
Epoch 29: Train loss = 0.0060, Val loss = 0.0099
Epoch 30: Train loss = 0.0060, Val loss = 0.0104
Epoch 31: Train loss = 0.0060, Val loss = 0.0099
Epoch 32: Train loss = 0.0059, Val loss = 0.0107
Epoch 33: Train loss = 0.0059, Val loss = 0.0101
Epoch 34: Train loss = 0.0059, Val loss = 0.0100
Epoch 35: Train loss = 0.0058, Val loss = 0.0094
Epoch 36: Train loss = 0.0058, Val loss = 0.0096
Epoch 37: Train loss = 0.0058, Val loss = 0.0103
Epoch 38: Train loss = 0.0058, Val loss = 0.0095
Epoch 39: Train loss = 0.0057, Val loss = 0.0093
Epoch 40: Train loss = 0.0057, Val loss = 0.0090
Epoch 41: Train loss = 0.0057, Val loss = 0.0091
Epoch 42: Train loss = 0.0056, Val loss = 0.0089
Epoch 43: Train loss = 0.0056, Val loss = 0.0088
Epoch 44: Train loss = 0.0056, Val loss = 0.0082
Epoch 45: Train loss = 0.0055, Val loss = 0.0077
Epoch 46: Train loss = 0.0055, Val loss = 0.0069
Epoch 47: Train loss = 0.0055, Val loss = 0.0068
Epoch 48: Train loss = 0.0055, Val loss = 0.0070
Epoch 49: Train loss = 0.0055, Val loss = 0.0072
Epoch 50: Train loss = 0.0054, Val loss = 0.0077
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0465, Val loss = 0.0263
Epoch 2: Train loss = 0.0422, Val loss = 0.0223
Epoch 3: Train loss = 0.0379, Val loss = 0.0186
Epoch 4: Train loss = 0.0335, Val loss = 0.0150
Epoch 5: Train loss = 0.0290, Val loss = 0.0117
Epoch 6: Train loss = 0.0246, Val loss = 0.0090
Epoch 7: Train loss = 0.0203, Val loss = 0.0073
Epoch 8: Train loss = 0.0164, Val loss = 0.0070
Epoch 9: Train loss = 0.0133, Val loss = 0.0082
Epoch 10: Train loss = 0.0111, Val loss = 0.0105
Epoch 11: Train loss = 0.0099, Val loss = 0.0128
Epoch 12: Train loss = 0.0092, Val loss = 0.0141
Epoch 13: Train loss = 0.0087, Val loss = 0.0139
Epoch 14: Train loss = 0.0081, Val loss = 0.0128
Epoch 15: Train loss = 0.0077, Val loss = 0.0113
Epoch 16: Train loss = 0.0074, Val loss = 0.0100
Epoch 17: Train loss = 0.0073, Val loss = 0.0091
Epoch 18: Train loss = 0.0072, Val loss = 0.0087
Epoch 19: Train loss = 0.0071, Val loss = 0.0086
Epoch 20: Train loss = 0.0071, Val loss = 0.0089
Epoch 21: Train loss = 0.0070, Val loss = 0.0093
Epoch 22: Train loss = 0.0069, Val loss = 0.0097
Epoch 23: Train loss = 0.0068, Val loss = 0.0099
Epoch 24: Train loss = 0.0068, Val loss = 0.0098
Epoch 25: Train loss = 0.0067, Val loss = 0.0097
Epoch 26: Train loss = 0.0067, Val loss = 0.0095
Epoch 27: Train loss = 0.0067, Val loss = 0.0094
Epoch 28: Train loss = 0.0066, Val loss = 0.0094
Epoch 29: Train loss = 0.0066, Val loss = 0.0094
Epoch 30: Train loss = 0.0066, Val loss = 0.0094
Epoch 31: Train loss = 0.0066, Val loss = 0.0095
Epoch 32: Train loss = 0.0066, Val loss = 0.0096
Epoch 33: Train loss = 0.0065, Val loss = 0.0097
Epoch 34: Train loss = 0.0065, Val loss = 0.0097
Epoch 35: Train loss = 0.0065, Val loss = 0.0096
Epoch 36: Train loss = 0.0065, Val loss = 0.0096
Epoch 37: Train loss = 0.0065, Val loss = 0.0097
Epoch 38: Train loss = 0.0065, Val loss = 0.0097
Epoch 39: Train loss = 0.0065, Val loss = 0.0097
Epoch 40: Train loss = 0.0064, Val loss = 0.0099
Epoch 41: Train loss = 0.0064, Val loss = 0.0099
Epoch 42: Train loss = 0.0064, Val loss = 0.0100
Epoch 43: Train loss = 0.0064, Val loss = 0.0100
Epoch 44: Train loss = 0.0064, Val loss = 0.0100
Epoch 45: Train loss = 0.0064, Val loss = 0.0100
Epoch 46: Train loss = 0.0064, Val loss = 0.0100
Epoch 47: Train loss = 0.0064, Val loss = 0.0100
Epoch 48: Train loss = 0.0064, Val loss = 0.0101
Epoch 49: Train loss = 0.0064, Val loss = 0.0101
Epoch 50: Train loss = 0.0064, Val loss = 0.0101
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0378, Val loss = 0.0121
Epoch 2: Train loss = 0.0155, Val loss = 0.0216
Epoch 3: Train loss = 0.0091, Val loss = 0.0085
Epoch 4: Train loss = 0.0091, Val loss = 0.0081
Epoch 5: Train loss = 0.0077, Val loss = 0.0121
Epoch 6: Train loss = 0.0072, Val loss = 0.0088
Epoch 7: Train loss = 0.0069, Val loss = 0.0076
Epoch 8: Train loss = 0.0068, Val loss = 0.0098
Epoch 9: Train loss = 0.0067, Val loss = 0.0096
Epoch 10: Train loss = 0.0065, Val loss = 0.0088
Epoch 11: Train loss = 0.0065, Val loss = 0.0101
Epoch 12: Train loss = 0.0065, Val loss = 0.0104
Epoch 13: Train loss = 0.0065, Val loss = 0.0101
Epoch 14: Train loss = 0.0064, Val loss = 0.0104
Epoch 15: Train loss = 0.0064, Val loss = 0.0104
Epoch 16: Train loss = 0.0063, Val loss = 0.0107
Epoch 17: Train loss = 0.0063, Val loss = 0.0102
Epoch 18: Train loss = 0.0063, Val loss = 0.0105
Epoch 19: Train loss = 0.0063, Val loss = 0.0109
Epoch 20: Train loss = 0.0062, Val loss = 0.0101
Epoch 21: Train loss = 0.0062, Val loss = 0.0105
Epoch 22: Train loss = 0.0062, Val loss = 0.0106
Epoch 23: Train loss = 0.0062, Val loss = 0.0101
Epoch 24: Train loss = 0.0062, Val loss = 0.0107
Epoch 25: Train loss = 0.0061, Val loss = 0.0102
Epoch 26: Train loss = 0.0061, Val loss = 0.0101
Epoch 27: Train loss = 0.0061, Val loss = 0.0106
Epoch 28: Train loss = 0.0060, Val loss = 0.0097
Epoch 29: Train loss = 0.0061, Val loss = 0.0110
Epoch 30: Train loss = 0.0060, Val loss = 0.0101
Epoch 31: Train loss = 0.0060, Val loss = 0.0102
Epoch 32: Train loss = 0.0060, Val loss = 0.0104
Epoch 33: Train loss = 0.0059, Val loss = 0.0105
Epoch 34: Train loss = 0.0059, Val loss = 0.0098
Epoch 35: Train loss = 0.0059, Val loss = 0.0110
Epoch 36: Train loss = 0.0059, Val loss = 0.0097
Epoch 37: Train loss = 0.0059, Val loss = 0.0106
Epoch 38: Train loss = 0.0059, Val loss = 0.0100
Epoch 39: Train loss = 0.0058, Val loss = 0.0092
Epoch 40: Train loss = 0.0058, Val loss = 0.0104
Epoch 41: Train loss = 0.0058, Val loss = 0.0097
Epoch 42: Train loss = 0.0058, Val loss = 0.0095
Epoch 43: Train loss = 0.0057, Val loss = 0.0093
Epoch 44: Train loss = 0.0057, Val loss = 0.0095
Epoch 45: Train loss = 0.0057, Val loss = 0.0084
Epoch 46: Train loss = 0.0057, Val loss = 0.0079
Epoch 47: Train loss = 0.0056, Val loss = 0.0101
Epoch 48: Train loss = 0.0057, Val loss = 0.0098
Epoch 49: Train loss = 0.0056, Val loss = 0.0079
Epoch 50: Train loss = 0.0056, Val loss = 0.0075
Epoch 51: Train loss = 0.0056, Val loss = 0.0077
Epoch 52: Train loss = 0.0055, Val loss = 0.0094
Epoch 53: Train loss = 0.0055, Val loss = 0.0077
Epoch 54: Train loss = 0.0054, Val loss = 0.0074
Epoch 55: Train loss = 0.0054, Val loss = 0.0087
Epoch 56: Train loss = 0.0054, Val loss = 0.0086
Epoch 57: Train loss = 0.0054, Val loss = 0.0083
Epoch 58: Train loss = 0.0053, Val loss = 0.0072
Epoch 59: Train loss = 0.0053, Val loss = 0.0060
Epoch 60: Train loss = 0.0053, Val loss = 0.0056
Epoch 61: Train loss = 0.0054, Val loss = 0.0055
Epoch 62: Train loss = 0.0056, Val loss = 0.0076
Epoch 63: Train loss = 0.0054, Val loss = 0.0086
Epoch 64: Train loss = 0.0053, Val loss = 0.0067
Epoch 65: Train loss = 0.0053, Val loss = 0.0052
Epoch 66: Train loss = 0.0053, Val loss = 0.0065
Epoch 67: Train loss = 0.0052, Val loss = 0.0083
Epoch 68: Train loss = 0.0052, Val loss = 0.0068
Epoch 69: Train loss = 0.0051, Val loss = 0.0057
Epoch 70: Train loss = 0.0051, Val loss = 0.0061
Epoch 71: Train loss = 0.0050, Val loss = 0.0063
Epoch 72: Train loss = 0.0050, Val loss = 0.0061
Epoch 73: Train loss = 0.0050, Val loss = 0.0064
Epoch 74: Train loss = 0.0049, Val loss = 0.0063
Epoch 75: Train loss = 0.0049, Val loss = 0.0064
Early stopping triggered at epoch 75.
Using device: mps
Epoch 1: Train loss = 0.0569, Val loss = 0.0366
Epoch 2: Train loss = 0.0554, Val loss = 0.0353
Epoch 3: Train loss = 0.0540, Val loss = 0.0341
Epoch 4: Train loss = 0.0525, Val loss = 0.0326
Epoch 5: Train loss = 0.0509, Val loss = 0.0310
Epoch 6: Train loss = 0.0489, Val loss = 0.0292
Epoch 7: Train loss = 0.0467, Val loss = 0.0270
Epoch 8: Train loss = 0.0442, Val loss = 0.0245
Epoch 9: Train loss = 0.0412, Val loss = 0.0217
Epoch 10: Train loss = 0.0378, Val loss = 0.0187
Epoch 11: Train loss = 0.0341, Val loss = 0.0154
Epoch 12: Train loss = 0.0299, Val loss = 0.0121
Epoch 13: Train loss = 0.0252, Val loss = 0.0093
Epoch 14: Train loss = 0.0205, Val loss = 0.0076
Epoch 15: Train loss = 0.0162, Val loss = 0.0079
Epoch 16: Train loss = 0.0129, Val loss = 0.0105
Epoch 17: Train loss = 0.0110, Val loss = 0.0142
Epoch 18: Train loss = 0.0103, Val loss = 0.0162
Epoch 19: Train loss = 0.0096, Val loss = 0.0154
Epoch 20: Train loss = 0.0086, Val loss = 0.0129
Epoch 21: Train loss = 0.0078, Val loss = 0.0104
Epoch 22: Train loss = 0.0074, Val loss = 0.0087
Epoch 23: Train loss = 0.0073, Val loss = 0.0079
Epoch 24: Train loss = 0.0072, Val loss = 0.0079
Epoch 25: Train loss = 0.0072, Val loss = 0.0085
Epoch 26: Train loss = 0.0071, Val loss = 0.0092
Epoch 27: Train loss = 0.0070, Val loss = 0.0099
Epoch 28: Train loss = 0.0069, Val loss = 0.0102
Epoch 29: Train loss = 0.0069, Val loss = 0.0100
Epoch 30: Train loss = 0.0069, Val loss = 0.0096
Epoch 31: Train loss = 0.0068, Val loss = 0.0092
Epoch 32: Train loss = 0.0068, Val loss = 0.0089
Epoch 33: Train loss = 0.0068, Val loss = 0.0089
Epoch 34: Train loss = 0.0067, Val loss = 0.0091
Epoch 35: Train loss = 0.0067, Val loss = 0.0093
Epoch 36: Train loss = 0.0066, Val loss = 0.0096
Epoch 37: Train loss = 0.0066, Val loss = 0.0096
Epoch 38: Train loss = 0.0066, Val loss = 0.0095
Epoch 39: Train loss = 0.0066, Val loss = 0.0093
Epoch 40: Train loss = 0.0066, Val loss = 0.0092
Epoch 41: Train loss = 0.0066, Val loss = 0.0091
Epoch 42: Train loss = 0.0066, Val loss = 0.0092
Epoch 43: Train loss = 0.0065, Val loss = 0.0093
Epoch 44: Train loss = 0.0065, Val loss = 0.0095
Epoch 45: Train loss = 0.0065, Val loss = 0.0095
Epoch 46: Train loss = 0.0065, Val loss = 0.0095
Epoch 47: Train loss = 0.0065, Val loss = 0.0093
Epoch 48: Train loss = 0.0065, Val loss = 0.0093
Epoch 49: Train loss = 0.0065, Val loss = 0.0094
Epoch 50: Train loss = 0.0065, Val loss = 0.0094
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0441, Val loss = 0.0200
Epoch 2: Train loss = 0.0302, Val loss = 0.0086
Epoch 3: Train loss = 0.0154, Val loss = 0.0256
Epoch 4: Train loss = 0.0100, Val loss = 0.0065
Epoch 5: Train loss = 0.0082, Val loss = 0.0057
Epoch 6: Train loss = 0.0083, Val loss = 0.0104
Epoch 7: Train loss = 0.0077, Val loss = 0.0118
Epoch 8: Train loss = 0.0071, Val loss = 0.0072
Epoch 9: Train loss = 0.0069, Val loss = 0.0070
Epoch 10: Train loss = 0.0067, Val loss = 0.0102
Epoch 11: Train loss = 0.0067, Val loss = 0.0096
Epoch 12: Train loss = 0.0067, Val loss = 0.0074
Epoch 13: Train loss = 0.0066, Val loss = 0.0092
Epoch 14: Train loss = 0.0065, Val loss = 0.0105
Epoch 15: Train loss = 0.0065, Val loss = 0.0090
Epoch 16: Train loss = 0.0065, Val loss = 0.0089
Epoch 17: Train loss = 0.0065, Val loss = 0.0103
Epoch 18: Train loss = 0.0064, Val loss = 0.0101
Epoch 19: Train loss = 0.0064, Val loss = 0.0093
Epoch 20: Train loss = 0.0064, Val loss = 0.0108
Epoch 21: Train loss = 0.0064, Val loss = 0.0102
Epoch 22: Train loss = 0.0064, Val loss = 0.0098
Epoch 23: Train loss = 0.0064, Val loss = 0.0106
Epoch 24: Train loss = 0.0064, Val loss = 0.0105
Epoch 25: Train loss = 0.0063, Val loss = 0.0100
Epoch 26: Train loss = 0.0063, Val loss = 0.0103
Epoch 27: Train loss = 0.0063, Val loss = 0.0105
Epoch 28: Train loss = 0.0063, Val loss = 0.0099
Epoch 29: Train loss = 0.0063, Val loss = 0.0105
Epoch 30: Train loss = 0.0063, Val loss = 0.0100
Epoch 31: Train loss = 0.0063, Val loss = 0.0100
Epoch 32: Train loss = 0.0063, Val loss = 0.0101
Epoch 33: Train loss = 0.0062, Val loss = 0.0097
Epoch 34: Train loss = 0.0062, Val loss = 0.0102
Epoch 35: Train loss = 0.0062, Val loss = 0.0104
Epoch 36: Train loss = 0.0062, Val loss = 0.0094
Epoch 37: Train loss = 0.0062, Val loss = 0.0102
Epoch 38: Train loss = 0.0061, Val loss = 0.0094
Epoch 39: Train loss = 0.0061, Val loss = 0.0104
Epoch 40: Train loss = 0.0061, Val loss = 0.0092
Epoch 41: Train loss = 0.0061, Val loss = 0.0104
Epoch 42: Train loss = 0.0061, Val loss = 0.0093
Epoch 43: Train loss = 0.0060, Val loss = 0.0098
Epoch 44: Train loss = 0.0060, Val loss = 0.0100
Epoch 45: Train loss = 0.0060, Val loss = 0.0083
Epoch 46: Train loss = 0.0060, Val loss = 0.0122
Epoch 47: Train loss = 0.0060, Val loss = 0.0075
Epoch 48: Train loss = 0.0060, Val loss = 0.0124
Epoch 49: Train loss = 0.0059, Val loss = 0.0082
Epoch 50: Train loss = 0.0059, Val loss = 0.0100
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0498, Val loss = 0.0308
Epoch 2: Train loss = 0.0487, Val loss = 0.0296
Epoch 3: Train loss = 0.0473, Val loss = 0.0284
Epoch 4: Train loss = 0.0459, Val loss = 0.0271
Epoch 5: Train loss = 0.0442, Val loss = 0.0256
Epoch 6: Train loss = 0.0425, Val loss = 0.0240
Epoch 7: Train loss = 0.0405, Val loss = 0.0220
Epoch 8: Train loss = 0.0383, Val loss = 0.0198
Epoch 9: Train loss = 0.0355, Val loss = 0.0174
Epoch 10: Train loss = 0.0324, Val loss = 0.0147
Epoch 11: Train loss = 0.0288, Val loss = 0.0119
Epoch 12: Train loss = 0.0249, Val loss = 0.0093
Epoch 13: Train loss = 0.0209, Val loss = 0.0076
Epoch 14: Train loss = 0.0169, Val loss = 0.0071
Epoch 15: Train loss = 0.0137, Val loss = 0.0087
Epoch 16: Train loss = 0.0114, Val loss = 0.0118
Epoch 17: Train loss = 0.0104, Val loss = 0.0147
Epoch 18: Train loss = 0.0097, Val loss = 0.0154
Epoch 19: Train loss = 0.0090, Val loss = 0.0138
Epoch 20: Train loss = 0.0081, Val loss = 0.0113
Epoch 21: Train loss = 0.0075, Val loss = 0.0091
Epoch 22: Train loss = 0.0073, Val loss = 0.0079
Epoch 23: Train loss = 0.0073, Val loss = 0.0076
Epoch 24: Train loss = 0.0072, Val loss = 0.0079
Epoch 25: Train loss = 0.0071, Val loss = 0.0086
Epoch 26: Train loss = 0.0071, Val loss = 0.0093
Epoch 27: Train loss = 0.0070, Val loss = 0.0098
Epoch 28: Train loss = 0.0070, Val loss = 0.0099
Epoch 29: Train loss = 0.0069, Val loss = 0.0096
Epoch 30: Train loss = 0.0068, Val loss = 0.0091
Epoch 31: Train loss = 0.0068, Val loss = 0.0087
Epoch 32: Train loss = 0.0068, Val loss = 0.0085
Epoch 33: Train loss = 0.0067, Val loss = 0.0086
Epoch 34: Train loss = 0.0067, Val loss = 0.0089
Epoch 35: Train loss = 0.0067, Val loss = 0.0092
Epoch 36: Train loss = 0.0066, Val loss = 0.0093
Epoch 37: Train loss = 0.0066, Val loss = 0.0091
Epoch 38: Train loss = 0.0066, Val loss = 0.0089
Epoch 39: Train loss = 0.0066, Val loss = 0.0087
Epoch 40: Train loss = 0.0066, Val loss = 0.0088
Epoch 41: Train loss = 0.0065, Val loss = 0.0090
Epoch 42: Train loss = 0.0065, Val loss = 0.0091
Epoch 43: Train loss = 0.0065, Val loss = 0.0091
Epoch 44: Train loss = 0.0065, Val loss = 0.0090
Epoch 45: Train loss = 0.0065, Val loss = 0.0090
Epoch 46: Train loss = 0.0065, Val loss = 0.0090
Epoch 47: Train loss = 0.0065, Val loss = 0.0090
Epoch 48: Train loss = 0.0065, Val loss = 0.0091
Epoch 49: Train loss = 0.0065, Val loss = 0.0092
Epoch 50: Train loss = 0.0064, Val loss = 0.0092
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0440, Val loss = 0.0204
Epoch 2: Train loss = 0.0305, Val loss = 0.0120
Epoch 3: Train loss = 0.0164, Val loss = 0.0203
Epoch 4: Train loss = 0.0095, Val loss = 0.0068
Epoch 5: Train loss = 0.0085, Val loss = 0.0071
Epoch 6: Train loss = 0.0080, Val loss = 0.0115
Epoch 7: Train loss = 0.0075, Val loss = 0.0090
Epoch 8: Train loss = 0.0069, Val loss = 0.0067
Epoch 9: Train loss = 0.0068, Val loss = 0.0094
Epoch 10: Train loss = 0.0067, Val loss = 0.0097
Epoch 11: Train loss = 0.0066, Val loss = 0.0076
Epoch 12: Train loss = 0.0066, Val loss = 0.0101
Epoch 13: Train loss = 0.0066, Val loss = 0.0096
Epoch 14: Train loss = 0.0065, Val loss = 0.0095
Epoch 15: Train loss = 0.0064, Val loss = 0.0098
Epoch 16: Train loss = 0.0064, Val loss = 0.0103
Epoch 17: Train loss = 0.0064, Val loss = 0.0100
Epoch 18: Train loss = 0.0064, Val loss = 0.0107
Epoch 19: Train loss = 0.0064, Val loss = 0.0104
Epoch 20: Train loss = 0.0064, Val loss = 0.0099
Epoch 21: Train loss = 0.0063, Val loss = 0.0112
Epoch 22: Train loss = 0.0063, Val loss = 0.0101
Epoch 23: Train loss = 0.0063, Val loss = 0.0108
Epoch 24: Train loss = 0.0063, Val loss = 0.0111
Epoch 25: Train loss = 0.0063, Val loss = 0.0106
Epoch 26: Train loss = 0.0063, Val loss = 0.0103
Epoch 27: Train loss = 0.0062, Val loss = 0.0111
Epoch 28: Train loss = 0.0062, Val loss = 0.0103
Epoch 29: Train loss = 0.0062, Val loss = 0.0108
Epoch 30: Train loss = 0.0062, Val loss = 0.0106
Epoch 31: Train loss = 0.0061, Val loss = 0.0113
Epoch 32: Train loss = 0.0061, Val loss = 0.0095
Epoch 33: Train loss = 0.0061, Val loss = 0.0113
Epoch 34: Train loss = 0.0061, Val loss = 0.0097
Epoch 35: Train loss = 0.0061, Val loss = 0.0108
Epoch 36: Train loss = 0.0060, Val loss = 0.0089
Epoch 37: Train loss = 0.0061, Val loss = 0.0110
Epoch 38: Train loss = 0.0060, Val loss = 0.0082
Epoch 39: Train loss = 0.0060, Val loss = 0.0105
Epoch 40: Train loss = 0.0060, Val loss = 0.0095
Epoch 41: Train loss = 0.0059, Val loss = 0.0091
Epoch 42: Train loss = 0.0059, Val loss = 0.0109
Epoch 43: Train loss = 0.0059, Val loss = 0.0088
Epoch 44: Train loss = 0.0059, Val loss = 0.0089
Epoch 45: Train loss = 0.0058, Val loss = 0.0104
Epoch 46: Train loss = 0.0058, Val loss = 0.0085
Epoch 47: Train loss = 0.0058, Val loss = 0.0079
Epoch 48: Train loss = 0.0058, Val loss = 0.0097
Epoch 49: Train loss = 0.0058, Val loss = 0.0089
Epoch 50: Train loss = 0.0057, Val loss = 0.0063
Epoch 51: Train loss = 0.0058, Val loss = 0.0084
Epoch 52: Train loss = 0.0057, Val loss = 0.0096
Epoch 53: Train loss = 0.0057, Val loss = 0.0067
Epoch 54: Train loss = 0.0056, Val loss = 0.0058
Epoch 55: Train loss = 0.0058, Val loss = 0.0071
Epoch 56: Train loss = 0.0057, Val loss = 0.0098
Epoch 57: Train loss = 0.0057, Val loss = 0.0065
Epoch 58: Train loss = 0.0056, Val loss = 0.0057
Epoch 59: Train loss = 0.0056, Val loss = 0.0096
Epoch 60: Train loss = 0.0057, Val loss = 0.0072
Epoch 61: Train loss = 0.0056, Val loss = 0.0056
Epoch 62: Train loss = 0.0056, Val loss = 0.0075
Epoch 63: Train loss = 0.0055, Val loss = 0.0070
Epoch 64: Train loss = 0.0054, Val loss = 0.0061
Epoch 65: Train loss = 0.0054, Val loss = 0.0063
Epoch 66: Train loss = 0.0054, Val loss = 0.0074
Epoch 67: Train loss = 0.0053, Val loss = 0.0060
Epoch 68: Train loss = 0.0053, Val loss = 0.0060
Epoch 69: Train loss = 0.0053, Val loss = 0.0063
Epoch 70: Train loss = 0.0053, Val loss = 0.0067
Epoch 71: Train loss = 0.0053, Val loss = 0.0073
Early stopping triggered at epoch 71.
Using device: mps
Epoch 1: Train loss = 0.0567, Val loss = 0.0377
Epoch 2: Train loss = 0.0551, Val loss = 0.0360
Epoch 3: Train loss = 0.0537, Val loss = 0.0345
Epoch 4: Train loss = 0.0523, Val loss = 0.0332
Epoch 5: Train loss = 0.0509, Val loss = 0.0319
Epoch 6: Train loss = 0.0496, Val loss = 0.0307
Epoch 7: Train loss = 0.0484, Val loss = 0.0294
Epoch 8: Train loss = 0.0468, Val loss = 0.0283
Epoch 9: Train loss = 0.0453, Val loss = 0.0272
Epoch 10: Train loss = 0.0442, Val loss = 0.0263
Epoch 11: Train loss = 0.0429, Val loss = 0.0254
Epoch 12: Train loss = 0.0417, Val loss = 0.0247
Epoch 13: Train loss = 0.0407, Val loss = 0.0240
Epoch 14: Train loss = 0.0396, Val loss = 0.0233
Epoch 15: Train loss = 0.0386, Val loss = 0.0226
Epoch 16: Train loss = 0.0377, Val loss = 0.0219
Epoch 17: Train loss = 0.0367, Val loss = 0.0212
Epoch 18: Train loss = 0.0356, Val loss = 0.0205
Epoch 19: Train loss = 0.0347, Val loss = 0.0197
Epoch 20: Train loss = 0.0339, Val loss = 0.0189
Epoch 21: Train loss = 0.0331, Val loss = 0.0182
Epoch 22: Train loss = 0.0320, Val loss = 0.0174
Epoch 23: Train loss = 0.0310, Val loss = 0.0167
Epoch 24: Train loss = 0.0302, Val loss = 0.0160
Epoch 25: Train loss = 0.0296, Val loss = 0.0153
Epoch 26: Train loss = 0.0286, Val loss = 0.0148
Epoch 27: Train loss = 0.0277, Val loss = 0.0142
Epoch 28: Train loss = 0.0271, Val loss = 0.0136
Epoch 29: Train loss = 0.0263, Val loss = 0.0131
Epoch 30: Train loss = 0.0256, Val loss = 0.0126
Epoch 31: Train loss = 0.0249, Val loss = 0.0121
Epoch 32: Train loss = 0.0242, Val loss = 0.0116
Epoch 33: Train loss = 0.0236, Val loss = 0.0111
Epoch 34: Train loss = 0.0229, Val loss = 0.0107
Epoch 35: Train loss = 0.0226, Val loss = 0.0102
Epoch 36: Train loss = 0.0219, Val loss = 0.0098
Epoch 37: Train loss = 0.0211, Val loss = 0.0095
Epoch 38: Train loss = 0.0208, Val loss = 0.0092
Epoch 39: Train loss = 0.0204, Val loss = 0.0089
Epoch 40: Train loss = 0.0202, Val loss = 0.0087
Epoch 41: Train loss = 0.0198, Val loss = 0.0084
Epoch 42: Train loss = 0.0192, Val loss = 0.0082
Epoch 43: Train loss = 0.0190, Val loss = 0.0080
Epoch 44: Train loss = 0.0185, Val loss = 0.0079
Epoch 45: Train loss = 0.0185, Val loss = 0.0077
Epoch 46: Train loss = 0.0178, Val loss = 0.0075
Epoch 47: Train loss = 0.0174, Val loss = 0.0073
Epoch 48: Train loss = 0.0172, Val loss = 0.0072
Epoch 49: Train loss = 0.0172, Val loss = 0.0070
Epoch 50: Train loss = 0.0168, Val loss = 0.0069
Epoch 51: Train loss = 0.0165, Val loss = 0.0068
Epoch 52: Train loss = 0.0163, Val loss = 0.0067
Epoch 53: Train loss = 0.0162, Val loss = 0.0066
Epoch 54: Train loss = 0.0159, Val loss = 0.0065
Epoch 55: Train loss = 0.0158, Val loss = 0.0064
Epoch 56: Train loss = 0.0157, Val loss = 0.0062
Epoch 57: Train loss = 0.0152, Val loss = 0.0061
Epoch 58: Train loss = 0.0154, Val loss = 0.0061
Epoch 59: Train loss = 0.0152, Val loss = 0.0060
Epoch 60: Train loss = 0.0151, Val loss = 0.0060
Epoch 61: Train loss = 0.0145, Val loss = 0.0060
Epoch 62: Train loss = 0.0145, Val loss = 0.0060
Epoch 63: Train loss = 0.0144, Val loss = 0.0060
Epoch 64: Train loss = 0.0144, Val loss = 0.0060
Epoch 65: Train loss = 0.0143, Val loss = 0.0059
Epoch 66: Train loss = 0.0137, Val loss = 0.0059
Epoch 67: Train loss = 0.0138, Val loss = 0.0058
Epoch 68: Train loss = 0.0137, Val loss = 0.0058
Epoch 69: Train loss = 0.0137, Val loss = 0.0059
Epoch 70: Train loss = 0.0136, Val loss = 0.0059
Epoch 71: Train loss = 0.0136, Val loss = 0.0059
Epoch 72: Train loss = 0.0136, Val loss = 0.0059
Epoch 73: Train loss = 0.0134, Val loss = 0.0059
Epoch 74: Train loss = 0.0129, Val loss = 0.0058
Epoch 75: Train loss = 0.0130, Val loss = 0.0057
Epoch 76: Train loss = 0.0132, Val loss = 0.0057
Epoch 77: Train loss = 0.0130, Val loss = 0.0058
Epoch 78: Train loss = 0.0128, Val loss = 0.0058
Epoch 79: Train loss = 0.0125, Val loss = 0.0059
Epoch 80: Train loss = 0.0125, Val loss = 0.0059
Epoch 81: Train loss = 0.0127, Val loss = 0.0059
Epoch 82: Train loss = 0.0123, Val loss = 0.0059
Epoch 83: Train loss = 0.0123, Val loss = 0.0058
Epoch 84: Train loss = 0.0122, Val loss = 0.0058
Epoch 85: Train loss = 0.0123, Val loss = 0.0058
Early stopping triggered at epoch 85.
Using device: mps
Epoch 1: Train loss = 0.0679, Val loss = 0.0392
Epoch 2: Train loss = 0.0570, Val loss = 0.0305
Epoch 3: Train loss = 0.0466, Val loss = 0.0222
Epoch 4: Train loss = 0.0375, Val loss = 0.0180
Epoch 5: Train loss = 0.0315, Val loss = 0.0154
Epoch 6: Train loss = 0.0265, Val loss = 0.0112
Epoch 7: Train loss = 0.0221, Val loss = 0.0078
Epoch 8: Train loss = 0.0195, Val loss = 0.0059
Epoch 9: Train loss = 0.0172, Val loss = 0.0052
Epoch 10: Train loss = 0.0156, Val loss = 0.0053
Epoch 11: Train loss = 0.0144, Val loss = 0.0060
Epoch 12: Train loss = 0.0132, Val loss = 0.0060
Epoch 13: Train loss = 0.0127, Val loss = 0.0054
Epoch 14: Train loss = 0.0123, Val loss = 0.0052
Epoch 15: Train loss = 0.0121, Val loss = 0.0057
Epoch 16: Train loss = 0.0115, Val loss = 0.0063
Epoch 17: Train loss = 0.0111, Val loss = 0.0060
Epoch 18: Train loss = 0.0107, Val loss = 0.0059
Epoch 19: Train loss = 0.0106, Val loss = 0.0065
Epoch 20: Train loss = 0.0104, Val loss = 0.0063
Epoch 21: Train loss = 0.0100, Val loss = 0.0061
Epoch 22: Train loss = 0.0100, Val loss = 0.0065
Epoch 23: Train loss = 0.0098, Val loss = 0.0064
Epoch 24: Train loss = 0.0096, Val loss = 0.0061
Epoch 25: Train loss = 0.0094, Val loss = 0.0064
Epoch 26: Train loss = 0.0096, Val loss = 0.0063
Epoch 27: Train loss = 0.0092, Val loss = 0.0066
Epoch 28: Train loss = 0.0092, Val loss = 0.0065
Epoch 29: Train loss = 0.0091, Val loss = 0.0063
Epoch 30: Train loss = 0.0090, Val loss = 0.0072
Epoch 31: Train loss = 0.0089, Val loss = 0.0060
Epoch 32: Train loss = 0.0087, Val loss = 0.0069
Epoch 33: Train loss = 0.0089, Val loss = 0.0060
Epoch 34: Train loss = 0.0085, Val loss = 0.0064
Epoch 35: Train loss = 0.0084, Val loss = 0.0062
Epoch 36: Train loss = 0.0085, Val loss = 0.0067
Epoch 37: Train loss = 0.0083, Val loss = 0.0062
Epoch 38: Train loss = 0.0082, Val loss = 0.0070
Epoch 39: Train loss = 0.0081, Val loss = 0.0065
Epoch 40: Train loss = 0.0082, Val loss = 0.0063
Epoch 41: Train loss = 0.0080, Val loss = 0.0069
Epoch 42: Train loss = 0.0079, Val loss = 0.0064
Epoch 43: Train loss = 0.0078, Val loss = 0.0070
Epoch 44: Train loss = 0.0079, Val loss = 0.0061
Epoch 45: Train loss = 0.0077, Val loss = 0.0057
Epoch 46: Train loss = 0.0078, Val loss = 0.0065
Epoch 47: Train loss = 0.0076, Val loss = 0.0050
Epoch 48: Train loss = 0.0076, Val loss = 0.0067
Epoch 49: Train loss = 0.0077, Val loss = 0.0075
Epoch 50: Train loss = 0.0075, Val loss = 0.0051
Epoch 51: Train loss = 0.0074, Val loss = 0.0059
Epoch 52: Train loss = 0.0074, Val loss = 0.0065
Epoch 53: Train loss = 0.0074, Val loss = 0.0061
Epoch 54: Train loss = 0.0072, Val loss = 0.0047
Epoch 55: Train loss = 0.0073, Val loss = 0.0058
Epoch 56: Train loss = 0.0073, Val loss = 0.0073
Epoch 57: Train loss = 0.0073, Val loss = 0.0049
Epoch 58: Train loss = 0.0073, Val loss = 0.0053
Epoch 59: Train loss = 0.0071, Val loss = 0.0055
Epoch 60: Train loss = 0.0071, Val loss = 0.0050
Epoch 61: Train loss = 0.0070, Val loss = 0.0054
Epoch 62: Train loss = 0.0071, Val loss = 0.0067
Epoch 63: Train loss = 0.0070, Val loss = 0.0045
Epoch 64: Train loss = 0.0070, Val loss = 0.0041
Epoch 65: Train loss = 0.0070, Val loss = 0.0059
Epoch 66: Train loss = 0.0070, Val loss = 0.0062
Epoch 67: Train loss = 0.0070, Val loss = 0.0040
Epoch 68: Train loss = 0.0069, Val loss = 0.0044
Epoch 69: Train loss = 0.0069, Val loss = 0.0075
Epoch 70: Train loss = 0.0067, Val loss = 0.0045
Epoch 71: Train loss = 0.0068, Val loss = 0.0040
Epoch 72: Train loss = 0.0069, Val loss = 0.0065
Epoch 73: Train loss = 0.0068, Val loss = 0.0047
Epoch 74: Train loss = 0.0067, Val loss = 0.0050
Epoch 75: Train loss = 0.0067, Val loss = 0.0049
Epoch 76: Train loss = 0.0067, Val loss = 0.0055
Epoch 77: Train loss = 0.0067, Val loss = 0.0054
Early stopping triggered at epoch 77.
Using device: mps
Epoch 1: Train loss = 0.0470, Val loss = 0.0291
Epoch 2: Train loss = 0.0452, Val loss = 0.0272
Epoch 3: Train loss = 0.0433, Val loss = 0.0253
Epoch 4: Train loss = 0.0413, Val loss = 0.0235
Epoch 5: Train loss = 0.0393, Val loss = 0.0219
Epoch 6: Train loss = 0.0377, Val loss = 0.0205
Epoch 7: Train loss = 0.0360, Val loss = 0.0193
Epoch 8: Train loss = 0.0344, Val loss = 0.0182
Epoch 9: Train loss = 0.0330, Val loss = 0.0172
Epoch 10: Train loss = 0.0318, Val loss = 0.0164
Epoch 11: Train loss = 0.0305, Val loss = 0.0157
Epoch 12: Train loss = 0.0295, Val loss = 0.0151
Epoch 13: Train loss = 0.0285, Val loss = 0.0146
Epoch 14: Train loss = 0.0275, Val loss = 0.0142
Epoch 15: Train loss = 0.0267, Val loss = 0.0138
Epoch 16: Train loss = 0.0262, Val loss = 0.0135
Epoch 17: Train loss = 0.0253, Val loss = 0.0131
Epoch 18: Train loss = 0.0246, Val loss = 0.0128
Epoch 19: Train loss = 0.0238, Val loss = 0.0124
Epoch 20: Train loss = 0.0233, Val loss = 0.0120
Epoch 21: Train loss = 0.0229, Val loss = 0.0116
Epoch 22: Train loss = 0.0222, Val loss = 0.0113
Epoch 23: Train loss = 0.0218, Val loss = 0.0110
Epoch 24: Train loss = 0.0213, Val loss = 0.0108
Epoch 25: Train loss = 0.0207, Val loss = 0.0107
Epoch 26: Train loss = 0.0202, Val loss = 0.0105
Epoch 27: Train loss = 0.0198, Val loss = 0.0103
Epoch 28: Train loss = 0.0192, Val loss = 0.0102
Epoch 29: Train loss = 0.0189, Val loss = 0.0100
Epoch 30: Train loss = 0.0188, Val loss = 0.0098
Epoch 31: Train loss = 0.0182, Val loss = 0.0096
Epoch 32: Train loss = 0.0182, Val loss = 0.0094
Epoch 33: Train loss = 0.0178, Val loss = 0.0092
Epoch 34: Train loss = 0.0175, Val loss = 0.0090
Epoch 35: Train loss = 0.0172, Val loss = 0.0089
Epoch 36: Train loss = 0.0169, Val loss = 0.0087
Epoch 37: Train loss = 0.0166, Val loss = 0.0085
Epoch 38: Train loss = 0.0162, Val loss = 0.0084
Epoch 39: Train loss = 0.0160, Val loss = 0.0082
Epoch 40: Train loss = 0.0157, Val loss = 0.0081
Epoch 41: Train loss = 0.0155, Val loss = 0.0080
Epoch 42: Train loss = 0.0153, Val loss = 0.0078
Epoch 43: Train loss = 0.0151, Val loss = 0.0077
Epoch 44: Train loss = 0.0148, Val loss = 0.0076
Epoch 45: Train loss = 0.0148, Val loss = 0.0075
Epoch 46: Train loss = 0.0145, Val loss = 0.0074
Epoch 47: Train loss = 0.0143, Val loss = 0.0073
Epoch 48: Train loss = 0.0143, Val loss = 0.0072
Epoch 49: Train loss = 0.0140, Val loss = 0.0071
Epoch 50: Train loss = 0.0139, Val loss = 0.0071
Epoch 51: Train loss = 0.0138, Val loss = 0.0070
Epoch 52: Train loss = 0.0136, Val loss = 0.0069
Epoch 53: Train loss = 0.0136, Val loss = 0.0068
Epoch 54: Train loss = 0.0131, Val loss = 0.0067
Epoch 55: Train loss = 0.0131, Val loss = 0.0066
Epoch 56: Train loss = 0.0133, Val loss = 0.0066
Epoch 57: Train loss = 0.0130, Val loss = 0.0065
Epoch 58: Train loss = 0.0128, Val loss = 0.0065
Epoch 59: Train loss = 0.0127, Val loss = 0.0065
Epoch 60: Train loss = 0.0125, Val loss = 0.0064
Epoch 61: Train loss = 0.0124, Val loss = 0.0064
Epoch 62: Train loss = 0.0126, Val loss = 0.0064
Epoch 63: Train loss = 0.0123, Val loss = 0.0064
Epoch 64: Train loss = 0.0121, Val loss = 0.0063
Epoch 65: Train loss = 0.0120, Val loss = 0.0063
Epoch 66: Train loss = 0.0120, Val loss = 0.0063
Epoch 67: Train loss = 0.0119, Val loss = 0.0062
Epoch 68: Train loss = 0.0120, Val loss = 0.0062
Epoch 69: Train loss = 0.0119, Val loss = 0.0062
Epoch 70: Train loss = 0.0117, Val loss = 0.0062
Epoch 71: Train loss = 0.0116, Val loss = 0.0063
Epoch 72: Train loss = 0.0117, Val loss = 0.0062
Epoch 73: Train loss = 0.0115, Val loss = 0.0062
Epoch 74: Train loss = 0.0113, Val loss = 0.0062
Epoch 75: Train loss = 0.0113, Val loss = 0.0062
Epoch 76: Train loss = 0.0112, Val loss = 0.0062
Epoch 77: Train loss = 0.0114, Val loss = 0.0062
Early stopping triggered at epoch 77.
Using device: mps
Epoch 1: Train loss = 0.0575, Val loss = 0.0313
Epoch 2: Train loss = 0.0456, Val loss = 0.0236
Epoch 3: Train loss = 0.0372, Val loss = 0.0183
Epoch 4: Train loss = 0.0302, Val loss = 0.0140
Epoch 5: Train loss = 0.0248, Val loss = 0.0112
Epoch 6: Train loss = 0.0212, Val loss = 0.0090
Epoch 7: Train loss = 0.0181, Val loss = 0.0070
Epoch 8: Train loss = 0.0158, Val loss = 0.0059
Epoch 9: Train loss = 0.0143, Val loss = 0.0055
Epoch 10: Train loss = 0.0133, Val loss = 0.0052
Epoch 11: Train loss = 0.0126, Val loss = 0.0050
Epoch 12: Train loss = 0.0118, Val loss = 0.0052
Epoch 13: Train loss = 0.0116, Val loss = 0.0054
Epoch 14: Train loss = 0.0110, Val loss = 0.0054
Epoch 15: Train loss = 0.0108, Val loss = 0.0056
Epoch 16: Train loss = 0.0104, Val loss = 0.0058
Epoch 17: Train loss = 0.0101, Val loss = 0.0056
Epoch 18: Train loss = 0.0100, Val loss = 0.0058
Epoch 19: Train loss = 0.0098, Val loss = 0.0061
Epoch 20: Train loss = 0.0097, Val loss = 0.0059
Epoch 21: Train loss = 0.0094, Val loss = 0.0061
Epoch 22: Train loss = 0.0093, Val loss = 0.0060
Epoch 23: Train loss = 0.0091, Val loss = 0.0060
Epoch 24: Train loss = 0.0091, Val loss = 0.0061
Epoch 25: Train loss = 0.0089, Val loss = 0.0058
Epoch 26: Train loss = 0.0087, Val loss = 0.0065
Epoch 27: Train loss = 0.0085, Val loss = 0.0057
Epoch 28: Train loss = 0.0086, Val loss = 0.0071
Epoch 29: Train loss = 0.0084, Val loss = 0.0059
Epoch 30: Train loss = 0.0083, Val loss = 0.0060
Epoch 31: Train loss = 0.0082, Val loss = 0.0069
Epoch 32: Train loss = 0.0082, Val loss = 0.0058
Epoch 33: Train loss = 0.0081, Val loss = 0.0068
Epoch 34: Train loss = 0.0081, Val loss = 0.0061
Epoch 35: Train loss = 0.0079, Val loss = 0.0056
Epoch 36: Train loss = 0.0080, Val loss = 0.0066
Epoch 37: Train loss = 0.0078, Val loss = 0.0061
Epoch 38: Train loss = 0.0078, Val loss = 0.0061
Epoch 39: Train loss = 0.0077, Val loss = 0.0063
Epoch 40: Train loss = 0.0075, Val loss = 0.0065
Epoch 41: Train loss = 0.0076, Val loss = 0.0058
Epoch 42: Train loss = 0.0075, Val loss = 0.0062
Epoch 43: Train loss = 0.0074, Val loss = 0.0061
Epoch 44: Train loss = 0.0075, Val loss = 0.0063
Epoch 45: Train loss = 0.0074, Val loss = 0.0056
Epoch 46: Train loss = 0.0072, Val loss = 0.0065
Epoch 47: Train loss = 0.0073, Val loss = 0.0055
Epoch 48: Train loss = 0.0073, Val loss = 0.0061
Epoch 49: Train loss = 0.0073, Val loss = 0.0053
Epoch 50: Train loss = 0.0072, Val loss = 0.0064
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0595, Val loss = 0.0399
Epoch 2: Train loss = 0.0591, Val loss = 0.0394
Epoch 3: Train loss = 0.0585, Val loss = 0.0389
Epoch 4: Train loss = 0.0578, Val loss = 0.0384
Epoch 5: Train loss = 0.0574, Val loss = 0.0380
Epoch 6: Train loss = 0.0569, Val loss = 0.0375
Epoch 7: Train loss = 0.0562, Val loss = 0.0370
Epoch 8: Train loss = 0.0559, Val loss = 0.0366
Epoch 9: Train loss = 0.0554, Val loss = 0.0361
Epoch 10: Train loss = 0.0547, Val loss = 0.0357
Epoch 11: Train loss = 0.0542, Val loss = 0.0352
Epoch 12: Train loss = 0.0537, Val loss = 0.0347
Epoch 13: Train loss = 0.0531, Val loss = 0.0342
Epoch 14: Train loss = 0.0526, Val loss = 0.0337
Epoch 15: Train loss = 0.0520, Val loss = 0.0331
Epoch 16: Train loss = 0.0514, Val loss = 0.0325
Epoch 17: Train loss = 0.0507, Val loss = 0.0319
Epoch 18: Train loss = 0.0498, Val loss = 0.0312
Epoch 19: Train loss = 0.0493, Val loss = 0.0305
Epoch 20: Train loss = 0.0484, Val loss = 0.0297
Epoch 21: Train loss = 0.0478, Val loss = 0.0289
Epoch 22: Train loss = 0.0469, Val loss = 0.0281
Epoch 23: Train loss = 0.0459, Val loss = 0.0273
Epoch 24: Train loss = 0.0450, Val loss = 0.0265
Epoch 25: Train loss = 0.0440, Val loss = 0.0256
Epoch 26: Train loss = 0.0429, Val loss = 0.0247
Epoch 27: Train loss = 0.0421, Val loss = 0.0238
Epoch 28: Train loss = 0.0410, Val loss = 0.0229
Epoch 29: Train loss = 0.0400, Val loss = 0.0221
Epoch 30: Train loss = 0.0389, Val loss = 0.0212
Epoch 31: Train loss = 0.0374, Val loss = 0.0204
Epoch 32: Train loss = 0.0364, Val loss = 0.0196
Epoch 33: Train loss = 0.0352, Val loss = 0.0189
Epoch 34: Train loss = 0.0341, Val loss = 0.0182
Epoch 35: Train loss = 0.0330, Val loss = 0.0175
Epoch 36: Train loss = 0.0318, Val loss = 0.0168
Epoch 37: Train loss = 0.0309, Val loss = 0.0161
Epoch 38: Train loss = 0.0297, Val loss = 0.0154
Epoch 39: Train loss = 0.0285, Val loss = 0.0148
Epoch 40: Train loss = 0.0276, Val loss = 0.0141
Epoch 41: Train loss = 0.0265, Val loss = 0.0135
Epoch 42: Train loss = 0.0255, Val loss = 0.0128
Epoch 43: Train loss = 0.0243, Val loss = 0.0122
Epoch 44: Train loss = 0.0236, Val loss = 0.0116
Epoch 45: Train loss = 0.0226, Val loss = 0.0110
Epoch 46: Train loss = 0.0217, Val loss = 0.0104
Epoch 47: Train loss = 0.0214, Val loss = 0.0099
Epoch 48: Train loss = 0.0204, Val loss = 0.0094
Epoch 49: Train loss = 0.0197, Val loss = 0.0090
Epoch 50: Train loss = 0.0192, Val loss = 0.0086
Epoch 51: Train loss = 0.0187, Val loss = 0.0082
Epoch 52: Train loss = 0.0179, Val loss = 0.0078
Epoch 53: Train loss = 0.0177, Val loss = 0.0075
Epoch 54: Train loss = 0.0171, Val loss = 0.0072
Epoch 55: Train loss = 0.0168, Val loss = 0.0069
Epoch 56: Train loss = 0.0165, Val loss = 0.0066
Epoch 57: Train loss = 0.0162, Val loss = 0.0064
Epoch 58: Train loss = 0.0159, Val loss = 0.0062
Epoch 59: Train loss = 0.0155, Val loss = 0.0060
Epoch 60: Train loss = 0.0154, Val loss = 0.0058
Epoch 61: Train loss = 0.0148, Val loss = 0.0057
Epoch 62: Train loss = 0.0148, Val loss = 0.0056
Epoch 63: Train loss = 0.0146, Val loss = 0.0055
Epoch 64: Train loss = 0.0147, Val loss = 0.0055
Epoch 65: Train loss = 0.0141, Val loss = 0.0055
Epoch 66: Train loss = 0.0141, Val loss = 0.0055
Epoch 67: Train loss = 0.0138, Val loss = 0.0054
Epoch 68: Train loss = 0.0140, Val loss = 0.0054
Epoch 69: Train loss = 0.0137, Val loss = 0.0053
Epoch 70: Train loss = 0.0136, Val loss = 0.0053
Epoch 71: Train loss = 0.0136, Val loss = 0.0052
Epoch 72: Train loss = 0.0134, Val loss = 0.0052
Epoch 73: Train loss = 0.0135, Val loss = 0.0051
Epoch 74: Train loss = 0.0131, Val loss = 0.0051
Epoch 75: Train loss = 0.0130, Val loss = 0.0052
Epoch 76: Train loss = 0.0131, Val loss = 0.0052
Epoch 77: Train loss = 0.0130, Val loss = 0.0052
Epoch 78: Train loss = 0.0125, Val loss = 0.0051
Epoch 79: Train loss = 0.0127, Val loss = 0.0051
Epoch 80: Train loss = 0.0128, Val loss = 0.0050
Epoch 81: Train loss = 0.0126, Val loss = 0.0049
Epoch 82: Train loss = 0.0124, Val loss = 0.0050
Epoch 83: Train loss = 0.0123, Val loss = 0.0049
Epoch 84: Train loss = 0.0125, Val loss = 0.0049
Epoch 85: Train loss = 0.0123, Val loss = 0.0050
Epoch 86: Train loss = 0.0125, Val loss = 0.0049
Epoch 87: Train loss = 0.0123, Val loss = 0.0049
Epoch 88: Train loss = 0.0120, Val loss = 0.0050
Epoch 89: Train loss = 0.0120, Val loss = 0.0050
Epoch 90: Train loss = 0.0121, Val loss = 0.0050
Early stopping triggered at epoch 90.
Using device: mps
Epoch 1: Train loss = 0.0543, Val loss = 0.0325
Epoch 2: Train loss = 0.0492, Val loss = 0.0282
Epoch 3: Train loss = 0.0438, Val loss = 0.0237
Epoch 4: Train loss = 0.0379, Val loss = 0.0197
Epoch 5: Train loss = 0.0320, Val loss = 0.0164
Epoch 6: Train loss = 0.0268, Val loss = 0.0129
Epoch 7: Train loss = 0.0222, Val loss = 0.0093
Epoch 8: Train loss = 0.0186, Val loss = 0.0072
Epoch 9: Train loss = 0.0160, Val loss = 0.0067
Epoch 10: Train loss = 0.0143, Val loss = 0.0062
Epoch 11: Train loss = 0.0129, Val loss = 0.0049
Epoch 12: Train loss = 0.0124, Val loss = 0.0048
Epoch 13: Train loss = 0.0114, Val loss = 0.0055
Epoch 14: Train loss = 0.0110, Val loss = 0.0050
Epoch 15: Train loss = 0.0108, Val loss = 0.0046
Epoch 16: Train loss = 0.0104, Val loss = 0.0053
Epoch 17: Train loss = 0.0103, Val loss = 0.0053
Epoch 18: Train loss = 0.0097, Val loss = 0.0051
Epoch 19: Train loss = 0.0096, Val loss = 0.0059
Epoch 20: Train loss = 0.0096, Val loss = 0.0052
Epoch 21: Train loss = 0.0094, Val loss = 0.0049
Epoch 22: Train loss = 0.0093, Val loss = 0.0053
Epoch 23: Train loss = 0.0093, Val loss = 0.0050
Epoch 24: Train loss = 0.0088, Val loss = 0.0052
Epoch 25: Train loss = 0.0087, Val loss = 0.0047
Epoch 26: Train loss = 0.0088, Val loss = 0.0049
Epoch 27: Train loss = 0.0087, Val loss = 0.0045
Epoch 28: Train loss = 0.0088, Val loss = 0.0045
Epoch 29: Train loss = 0.0087, Val loss = 0.0057
Epoch 30: Train loss = 0.0085, Val loss = 0.0041
Epoch 31: Train loss = 0.0085, Val loss = 0.0053
Epoch 32: Train loss = 0.0082, Val loss = 0.0047
Epoch 33: Train loss = 0.0082, Val loss = 0.0053
Epoch 34: Train loss = 0.0081, Val loss = 0.0050
Epoch 35: Train loss = 0.0081, Val loss = 0.0046
Epoch 36: Train loss = 0.0078, Val loss = 0.0055
Epoch 37: Train loss = 0.0078, Val loss = 0.0047
Epoch 38: Train loss = 0.0079, Val loss = 0.0051
Epoch 39: Train loss = 0.0079, Val loss = 0.0048
Epoch 40: Train loss = 0.0077, Val loss = 0.0047
Epoch 41: Train loss = 0.0077, Val loss = 0.0048
Epoch 42: Train loss = 0.0076, Val loss = 0.0046
Epoch 43: Train loss = 0.0076, Val loss = 0.0055
Epoch 44: Train loss = 0.0075, Val loss = 0.0046
Epoch 45: Train loss = 0.0074, Val loss = 0.0057
Epoch 46: Train loss = 0.0073, Val loss = 0.0047
Epoch 47: Train loss = 0.0073, Val loss = 0.0056
Epoch 48: Train loss = 0.0073, Val loss = 0.0048
Epoch 49: Train loss = 0.0072, Val loss = 0.0056
Epoch 50: Train loss = 0.0072, Val loss = 0.0048
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0579, Val loss = 0.0383
Epoch 2: Train loss = 0.0573, Val loss = 0.0378
Epoch 3: Train loss = 0.0567, Val loss = 0.0373
Epoch 4: Train loss = 0.0562, Val loss = 0.0367
Epoch 5: Train loss = 0.0555, Val loss = 0.0362
Epoch 6: Train loss = 0.0550, Val loss = 0.0356
Epoch 7: Train loss = 0.0543, Val loss = 0.0349
Epoch 8: Train loss = 0.0537, Val loss = 0.0343
Epoch 9: Train loss = 0.0532, Val loss = 0.0337
Epoch 10: Train loss = 0.0524, Val loss = 0.0332
Epoch 11: Train loss = 0.0519, Val loss = 0.0326
Epoch 12: Train loss = 0.0513, Val loss = 0.0320
Epoch 13: Train loss = 0.0505, Val loss = 0.0314
Epoch 14: Train loss = 0.0497, Val loss = 0.0307
Epoch 15: Train loss = 0.0490, Val loss = 0.0299
Epoch 16: Train loss = 0.0482, Val loss = 0.0291
Epoch 17: Train loss = 0.0472, Val loss = 0.0282
Epoch 18: Train loss = 0.0463, Val loss = 0.0273
Epoch 19: Train loss = 0.0453, Val loss = 0.0263
Epoch 20: Train loss = 0.0441, Val loss = 0.0253
Epoch 21: Train loss = 0.0431, Val loss = 0.0241
Epoch 22: Train loss = 0.0416, Val loss = 0.0229
Epoch 23: Train loss = 0.0403, Val loss = 0.0217
Epoch 24: Train loss = 0.0387, Val loss = 0.0204
Epoch 25: Train loss = 0.0374, Val loss = 0.0192
Epoch 26: Train loss = 0.0358, Val loss = 0.0180
Epoch 27: Train loss = 0.0345, Val loss = 0.0168
Epoch 28: Train loss = 0.0328, Val loss = 0.0158
Epoch 29: Train loss = 0.0313, Val loss = 0.0149
Epoch 30: Train loss = 0.0299, Val loss = 0.0140
Epoch 31: Train loss = 0.0284, Val loss = 0.0133
Epoch 32: Train loss = 0.0274, Val loss = 0.0128
Epoch 33: Train loss = 0.0258, Val loss = 0.0123
Epoch 34: Train loss = 0.0251, Val loss = 0.0119
Epoch 35: Train loss = 0.0240, Val loss = 0.0116
Epoch 36: Train loss = 0.0230, Val loss = 0.0112
Epoch 37: Train loss = 0.0222, Val loss = 0.0109
Epoch 38: Train loss = 0.0215, Val loss = 0.0105
Epoch 39: Train loss = 0.0208, Val loss = 0.0102
Epoch 40: Train loss = 0.0204, Val loss = 0.0098
Epoch 41: Train loss = 0.0197, Val loss = 0.0095
Epoch 42: Train loss = 0.0191, Val loss = 0.0092
Epoch 43: Train loss = 0.0186, Val loss = 0.0090
Epoch 44: Train loss = 0.0182, Val loss = 0.0087
Epoch 45: Train loss = 0.0178, Val loss = 0.0084
Epoch 46: Train loss = 0.0175, Val loss = 0.0082
Epoch 47: Train loss = 0.0171, Val loss = 0.0081
Epoch 48: Train loss = 0.0167, Val loss = 0.0079
Epoch 49: Train loss = 0.0165, Val loss = 0.0078
Epoch 50: Train loss = 0.0162, Val loss = 0.0076
Epoch 51: Train loss = 0.0158, Val loss = 0.0075
Epoch 52: Train loss = 0.0158, Val loss = 0.0074
Epoch 53: Train loss = 0.0155, Val loss = 0.0072
Epoch 54: Train loss = 0.0151, Val loss = 0.0071
Epoch 55: Train loss = 0.0147, Val loss = 0.0069
Epoch 56: Train loss = 0.0147, Val loss = 0.0068
Epoch 57: Train loss = 0.0145, Val loss = 0.0066
Epoch 58: Train loss = 0.0143, Val loss = 0.0065
Epoch 59: Train loss = 0.0141, Val loss = 0.0065
Epoch 60: Train loss = 0.0141, Val loss = 0.0064
Epoch 61: Train loss = 0.0137, Val loss = 0.0063
Epoch 62: Train loss = 0.0136, Val loss = 0.0063
Epoch 63: Train loss = 0.0135, Val loss = 0.0063
Epoch 64: Train loss = 0.0136, Val loss = 0.0063
Epoch 65: Train loss = 0.0132, Val loss = 0.0063
Epoch 66: Train loss = 0.0133, Val loss = 0.0063
Epoch 67: Train loss = 0.0129, Val loss = 0.0063
Epoch 68: Train loss = 0.0127, Val loss = 0.0062
Epoch 69: Train loss = 0.0127, Val loss = 0.0061
Epoch 70: Train loss = 0.0126, Val loss = 0.0061
Epoch 71: Train loss = 0.0125, Val loss = 0.0061
Epoch 72: Train loss = 0.0125, Val loss = 0.0060
Epoch 73: Train loss = 0.0122, Val loss = 0.0060
Epoch 74: Train loss = 0.0121, Val loss = 0.0060
Epoch 75: Train loss = 0.0121, Val loss = 0.0060
Epoch 76: Train loss = 0.0122, Val loss = 0.0059
Epoch 77: Train loss = 0.0119, Val loss = 0.0059
Epoch 78: Train loss = 0.0120, Val loss = 0.0060
Epoch 79: Train loss = 0.0120, Val loss = 0.0060
Epoch 80: Train loss = 0.0119, Val loss = 0.0060
Epoch 81: Train loss = 0.0120, Val loss = 0.0060
Epoch 82: Train loss = 0.0116, Val loss = 0.0059
Epoch 83: Train loss = 0.0116, Val loss = 0.0059
Epoch 84: Train loss = 0.0114, Val loss = 0.0059
Epoch 85: Train loss = 0.0112, Val loss = 0.0058
Epoch 86: Train loss = 0.0115, Val loss = 0.0058
Epoch 87: Train loss = 0.0112, Val loss = 0.0058
Epoch 88: Train loss = 0.0114, Val loss = 0.0058
Epoch 89: Train loss = 0.0112, Val loss = 0.0058
Epoch 90: Train loss = 0.0111, Val loss = 0.0059
Epoch 91: Train loss = 0.0111, Val loss = 0.0059
Epoch 92: Train loss = 0.0108, Val loss = 0.0059
Epoch 93: Train loss = 0.0112, Val loss = 0.0058
Epoch 94: Train loss = 0.0111, Val loss = 0.0057
Epoch 95: Train loss = 0.0109, Val loss = 0.0057
Early stopping triggered at epoch 95.
Using device: mps
Epoch 1: Train loss = 0.0582, Val loss = 0.0356
Epoch 2: Train loss = 0.0527, Val loss = 0.0310
Epoch 3: Train loss = 0.0470, Val loss = 0.0251
Epoch 4: Train loss = 0.0396, Val loss = 0.0191
Epoch 5: Train loss = 0.0317, Val loss = 0.0158
Epoch 6: Train loss = 0.0260, Val loss = 0.0167
Epoch 7: Train loss = 0.0220, Val loss = 0.0130
Epoch 8: Train loss = 0.0177, Val loss = 0.0079
Epoch 9: Train loss = 0.0149, Val loss = 0.0060
Epoch 10: Train loss = 0.0139, Val loss = 0.0058
Epoch 11: Train loss = 0.0130, Val loss = 0.0062
Epoch 12: Train loss = 0.0121, Val loss = 0.0063
Epoch 13: Train loss = 0.0118, Val loss = 0.0056
Epoch 14: Train loss = 0.0109, Val loss = 0.0051
Epoch 15: Train loss = 0.0108, Val loss = 0.0050
Epoch 16: Train loss = 0.0105, Val loss = 0.0053
Epoch 17: Train loss = 0.0101, Val loss = 0.0056
Epoch 18: Train loss = 0.0100, Val loss = 0.0052
Epoch 19: Train loss = 0.0098, Val loss = 0.0046
Epoch 20: Train loss = 0.0095, Val loss = 0.0050
Epoch 21: Train loss = 0.0094, Val loss = 0.0054
Epoch 22: Train loss = 0.0095, Val loss = 0.0050
Epoch 23: Train loss = 0.0091, Val loss = 0.0050
Epoch 24: Train loss = 0.0091, Val loss = 0.0051
Epoch 25: Train loss = 0.0091, Val loss = 0.0051
Epoch 26: Train loss = 0.0088, Val loss = 0.0051
Epoch 27: Train loss = 0.0088, Val loss = 0.0051
Epoch 28: Train loss = 0.0089, Val loss = 0.0049
Epoch 29: Train loss = 0.0087, Val loss = 0.0048
Epoch 30: Train loss = 0.0086, Val loss = 0.0052
Epoch 31: Train loss = 0.0085, Val loss = 0.0048
Epoch 32: Train loss = 0.0084, Val loss = 0.0046
Epoch 33: Train loss = 0.0083, Val loss = 0.0057
Epoch 34: Train loss = 0.0082, Val loss = 0.0050
Epoch 35: Train loss = 0.0083, Val loss = 0.0048
Epoch 36: Train loss = 0.0083, Val loss = 0.0058
Epoch 37: Train loss = 0.0080, Val loss = 0.0049
Epoch 38: Train loss = 0.0080, Val loss = 0.0053
Epoch 39: Train loss = 0.0079, Val loss = 0.0057
Epoch 40: Train loss = 0.0080, Val loss = 0.0050
Epoch 41: Train loss = 0.0080, Val loss = 0.0057
Epoch 42: Train loss = 0.0079, Val loss = 0.0054
Epoch 43: Train loss = 0.0079, Val loss = 0.0051
Epoch 44: Train loss = 0.0077, Val loss = 0.0059
Epoch 45: Train loss = 0.0076, Val loss = 0.0052
Epoch 46: Train loss = 0.0077, Val loss = 0.0063
Epoch 47: Train loss = 0.0076, Val loss = 0.0053
Epoch 48: Train loss = 0.0077, Val loss = 0.0054
Epoch 49: Train loss = 0.0075, Val loss = 0.0058
Epoch 50: Train loss = 0.0074, Val loss = 0.0054
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0541, Val loss = 0.0316
Epoch 2: Train loss = 0.0498, Val loss = 0.0275
Epoch 3: Train loss = 0.0456, Val loss = 0.0235
Epoch 4: Train loss = 0.0411, Val loss = 0.0197
Epoch 5: Train loss = 0.0367, Val loss = 0.0160
Epoch 6: Train loss = 0.0320, Val loss = 0.0128
Epoch 7: Train loss = 0.0275, Val loss = 0.0101
Epoch 8: Train loss = 0.0232, Val loss = 0.0085
Epoch 9: Train loss = 0.0194, Val loss = 0.0081
Epoch 10: Train loss = 0.0165, Val loss = 0.0090
Epoch 11: Train loss = 0.0145, Val loss = 0.0106
Epoch 12: Train loss = 0.0132, Val loss = 0.0122
Epoch 13: Train loss = 0.0124, Val loss = 0.0129
Epoch 14: Train loss = 0.0117, Val loss = 0.0124
Epoch 15: Train loss = 0.0110, Val loss = 0.0112
Epoch 16: Train loss = 0.0103, Val loss = 0.0098
Epoch 17: Train loss = 0.0101, Val loss = 0.0085
Epoch 18: Train loss = 0.0098, Val loss = 0.0077
Epoch 19: Train loss = 0.0097, Val loss = 0.0073
Epoch 20: Train loss = 0.0096, Val loss = 0.0073
Epoch 21: Train loss = 0.0094, Val loss = 0.0075
Epoch 22: Train loss = 0.0093, Val loss = 0.0078
Epoch 23: Train loss = 0.0092, Val loss = 0.0080
Epoch 24: Train loss = 0.0090, Val loss = 0.0082
Epoch 25: Train loss = 0.0090, Val loss = 0.0083
Epoch 26: Train loss = 0.0089, Val loss = 0.0082
Epoch 27: Train loss = 0.0088, Val loss = 0.0081
Epoch 28: Train loss = 0.0088, Val loss = 0.0080
Epoch 29: Train loss = 0.0087, Val loss = 0.0080
Epoch 30: Train loss = 0.0087, Val loss = 0.0079
Epoch 31: Train loss = 0.0085, Val loss = 0.0079
Epoch 32: Train loss = 0.0085, Val loss = 0.0080
Epoch 33: Train loss = 0.0085, Val loss = 0.0081
Epoch 34: Train loss = 0.0083, Val loss = 0.0082
Epoch 35: Train loss = 0.0084, Val loss = 0.0082
Epoch 36: Train loss = 0.0084, Val loss = 0.0081
Epoch 37: Train loss = 0.0082, Val loss = 0.0080
Epoch 38: Train loss = 0.0083, Val loss = 0.0079
Epoch 39: Train loss = 0.0082, Val loss = 0.0081
Epoch 40: Train loss = 0.0082, Val loss = 0.0082
Epoch 41: Train loss = 0.0082, Val loss = 0.0083
Epoch 42: Train loss = 0.0082, Val loss = 0.0084
Epoch 43: Train loss = 0.0081, Val loss = 0.0082
Epoch 44: Train loss = 0.0082, Val loss = 0.0083
Epoch 45: Train loss = 0.0081, Val loss = 0.0085
Epoch 46: Train loss = 0.0080, Val loss = 0.0084
Epoch 47: Train loss = 0.0080, Val loss = 0.0082
Epoch 48: Train loss = 0.0080, Val loss = 0.0082
Epoch 49: Train loss = 0.0079, Val loss = 0.0082
Epoch 50: Train loss = 0.0078, Val loss = 0.0084
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0405, Val loss = 0.0092
Epoch 2: Train loss = 0.0177, Val loss = 0.0223
Epoch 3: Train loss = 0.0119, Val loss = 0.0064
Epoch 4: Train loss = 0.0108, Val loss = 0.0047
Epoch 5: Train loss = 0.0106, Val loss = 0.0068
Epoch 6: Train loss = 0.0091, Val loss = 0.0102
Epoch 7: Train loss = 0.0086, Val loss = 0.0064
Epoch 8: Train loss = 0.0082, Val loss = 0.0053
Epoch 9: Train loss = 0.0083, Val loss = 0.0078
Epoch 10: Train loss = 0.0081, Val loss = 0.0089
Epoch 11: Train loss = 0.0078, Val loss = 0.0069
Epoch 12: Train loss = 0.0076, Val loss = 0.0077
Epoch 13: Train loss = 0.0077, Val loss = 0.0091
Epoch 14: Train loss = 0.0075, Val loss = 0.0084
Epoch 15: Train loss = 0.0074, Val loss = 0.0082
Epoch 16: Train loss = 0.0073, Val loss = 0.0091
Epoch 17: Train loss = 0.0072, Val loss = 0.0085
Epoch 18: Train loss = 0.0072, Val loss = 0.0083
Epoch 19: Train loss = 0.0071, Val loss = 0.0086
Epoch 20: Train loss = 0.0070, Val loss = 0.0082
Epoch 21: Train loss = 0.0071, Val loss = 0.0084
Epoch 22: Train loss = 0.0070, Val loss = 0.0088
Epoch 23: Train loss = 0.0069, Val loss = 0.0072
Epoch 24: Train loss = 0.0069, Val loss = 0.0094
Epoch 25: Train loss = 0.0068, Val loss = 0.0079
Epoch 26: Train loss = 0.0068, Val loss = 0.0077
Epoch 27: Train loss = 0.0067, Val loss = 0.0090
Epoch 28: Train loss = 0.0066, Val loss = 0.0075
Epoch 29: Train loss = 0.0067, Val loss = 0.0086
Epoch 30: Train loss = 0.0066, Val loss = 0.0072
Epoch 31: Train loss = 0.0065, Val loss = 0.0080
Epoch 32: Train loss = 0.0064, Val loss = 0.0083
Epoch 33: Train loss = 0.0064, Val loss = 0.0082
Epoch 34: Train loss = 0.0064, Val loss = 0.0070
Epoch 35: Train loss = 0.0063, Val loss = 0.0086
Epoch 36: Train loss = 0.0064, Val loss = 0.0090
Epoch 37: Train loss = 0.0063, Val loss = 0.0073
Epoch 38: Train loss = 0.0062, Val loss = 0.0060
Epoch 39: Train loss = 0.0062, Val loss = 0.0055
Epoch 40: Train loss = 0.0062, Val loss = 0.0063
Epoch 41: Train loss = 0.0062, Val loss = 0.0072
Epoch 42: Train loss = 0.0060, Val loss = 0.0071
Epoch 43: Train loss = 0.0060, Val loss = 0.0063
Epoch 44: Train loss = 0.0060, Val loss = 0.0057
Epoch 45: Train loss = 0.0059, Val loss = 0.0051
Epoch 46: Train loss = 0.0061, Val loss = 0.0038
Epoch 47: Train loss = 0.0062, Val loss = 0.0071
Epoch 48: Train loss = 0.0060, Val loss = 0.0079
Epoch 49: Train loss = 0.0059, Val loss = 0.0055
Epoch 50: Train loss = 0.0058, Val loss = 0.0052
Epoch 51: Train loss = 0.0058, Val loss = 0.0062
Epoch 52: Train loss = 0.0057, Val loss = 0.0071
Epoch 53: Train loss = 0.0058, Val loss = 0.0061
Epoch 54: Train loss = 0.0055, Val loss = 0.0053
Epoch 55: Train loss = 0.0057, Val loss = 0.0058
Epoch 56: Train loss = 0.0057, Val loss = 0.0060
Early stopping triggered at epoch 56.
Using device: mps
Epoch 1: Train loss = 0.0472, Val loss = 0.0262
Epoch 2: Train loss = 0.0428, Val loss = 0.0222
Epoch 3: Train loss = 0.0385, Val loss = 0.0184
Epoch 4: Train loss = 0.0341, Val loss = 0.0147
Epoch 5: Train loss = 0.0295, Val loss = 0.0114
Epoch 6: Train loss = 0.0250, Val loss = 0.0087
Epoch 7: Train loss = 0.0209, Val loss = 0.0072
Epoch 8: Train loss = 0.0173, Val loss = 0.0074
Epoch 9: Train loss = 0.0147, Val loss = 0.0090
Epoch 10: Train loss = 0.0133, Val loss = 0.0111
Epoch 11: Train loss = 0.0126, Val loss = 0.0122
Epoch 12: Train loss = 0.0118, Val loss = 0.0120
Epoch 13: Train loss = 0.0110, Val loss = 0.0107
Epoch 14: Train loss = 0.0103, Val loss = 0.0091
Epoch 15: Train loss = 0.0099, Val loss = 0.0078
Epoch 16: Train loss = 0.0097, Val loss = 0.0069
Epoch 17: Train loss = 0.0094, Val loss = 0.0065
Epoch 18: Train loss = 0.0093, Val loss = 0.0066
Epoch 19: Train loss = 0.0093, Val loss = 0.0070
Epoch 20: Train loss = 0.0091, Val loss = 0.0075
Epoch 21: Train loss = 0.0090, Val loss = 0.0079
Epoch 22: Train loss = 0.0089, Val loss = 0.0080
Epoch 23: Train loss = 0.0088, Val loss = 0.0080
Epoch 24: Train loss = 0.0087, Val loss = 0.0077
Epoch 25: Train loss = 0.0086, Val loss = 0.0075
Epoch 26: Train loss = 0.0086, Val loss = 0.0073
Epoch 27: Train loss = 0.0085, Val loss = 0.0073
Epoch 28: Train loss = 0.0086, Val loss = 0.0075
Epoch 29: Train loss = 0.0084, Val loss = 0.0077
Epoch 30: Train loss = 0.0083, Val loss = 0.0079
Epoch 31: Train loss = 0.0082, Val loss = 0.0080
Epoch 32: Train loss = 0.0083, Val loss = 0.0080
Epoch 33: Train loss = 0.0081, Val loss = 0.0079
Epoch 34: Train loss = 0.0082, Val loss = 0.0079
Epoch 35: Train loss = 0.0082, Val loss = 0.0079
Epoch 36: Train loss = 0.0081, Val loss = 0.0080
Epoch 37: Train loss = 0.0080, Val loss = 0.0083
Epoch 38: Train loss = 0.0080, Val loss = 0.0084
Epoch 39: Train loss = 0.0079, Val loss = 0.0085
Epoch 40: Train loss = 0.0079, Val loss = 0.0085
Epoch 41: Train loss = 0.0080, Val loss = 0.0086
Epoch 42: Train loss = 0.0080, Val loss = 0.0086
Epoch 43: Train loss = 0.0079, Val loss = 0.0086
Epoch 44: Train loss = 0.0078, Val loss = 0.0085
Epoch 45: Train loss = 0.0077, Val loss = 0.0086
Epoch 46: Train loss = 0.0077, Val loss = 0.0087
Epoch 47: Train loss = 0.0078, Val loss = 0.0089
Epoch 48: Train loss = 0.0077, Val loss = 0.0090
Epoch 49: Train loss = 0.0077, Val loss = 0.0090
Epoch 50: Train loss = 0.0075, Val loss = 0.0090
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0385, Val loss = 0.0088
Epoch 2: Train loss = 0.0169, Val loss = 0.0212
Epoch 3: Train loss = 0.0113, Val loss = 0.0052
Epoch 4: Train loss = 0.0109, Val loss = 0.0052
Epoch 5: Train loss = 0.0100, Val loss = 0.0103
Epoch 6: Train loss = 0.0093, Val loss = 0.0106
Epoch 7: Train loss = 0.0085, Val loss = 0.0061
Epoch 8: Train loss = 0.0083, Val loss = 0.0060
Epoch 9: Train loss = 0.0080, Val loss = 0.0096
Epoch 10: Train loss = 0.0079, Val loss = 0.0086
Epoch 11: Train loss = 0.0078, Val loss = 0.0065
Epoch 12: Train loss = 0.0078, Val loss = 0.0083
Epoch 13: Train loss = 0.0076, Val loss = 0.0097
Epoch 14: Train loss = 0.0075, Val loss = 0.0079
Epoch 15: Train loss = 0.0073, Val loss = 0.0078
Epoch 16: Train loss = 0.0073, Val loss = 0.0096
Epoch 17: Train loss = 0.0073, Val loss = 0.0087
Epoch 18: Train loss = 0.0072, Val loss = 0.0081
Epoch 19: Train loss = 0.0072, Val loss = 0.0092
Epoch 20: Train loss = 0.0072, Val loss = 0.0089
Epoch 21: Train loss = 0.0072, Val loss = 0.0081
Epoch 22: Train loss = 0.0070, Val loss = 0.0088
Epoch 23: Train loss = 0.0069, Val loss = 0.0086
Epoch 24: Train loss = 0.0069, Val loss = 0.0082
Epoch 25: Train loss = 0.0068, Val loss = 0.0092
Epoch 26: Train loss = 0.0068, Val loss = 0.0080
Epoch 27: Train loss = 0.0067, Val loss = 0.0085
Epoch 28: Train loss = 0.0067, Val loss = 0.0081
Epoch 29: Train loss = 0.0065, Val loss = 0.0081
Epoch 30: Train loss = 0.0066, Val loss = 0.0086
Epoch 31: Train loss = 0.0066, Val loss = 0.0086
Epoch 32: Train loss = 0.0066, Val loss = 0.0068
Epoch 33: Train loss = 0.0065, Val loss = 0.0092
Epoch 34: Train loss = 0.0065, Val loss = 0.0093
Epoch 35: Train loss = 0.0064, Val loss = 0.0069
Epoch 36: Train loss = 0.0063, Val loss = 0.0082
Epoch 37: Train loss = 0.0063, Val loss = 0.0088
Epoch 38: Train loss = 0.0062, Val loss = 0.0077
Epoch 39: Train loss = 0.0062, Val loss = 0.0079
Epoch 40: Train loss = 0.0062, Val loss = 0.0085
Epoch 41: Train loss = 0.0061, Val loss = 0.0056
Epoch 42: Train loss = 0.0061, Val loss = 0.0058
Epoch 43: Train loss = 0.0062, Val loss = 0.0065
Epoch 44: Train loss = 0.0061, Val loss = 0.0090
Epoch 45: Train loss = 0.0062, Val loss = 0.0085
Epoch 46: Train loss = 0.0061, Val loss = 0.0062
Epoch 47: Train loss = 0.0060, Val loss = 0.0051
Epoch 48: Train loss = 0.0060, Val loss = 0.0045
Epoch 49: Train loss = 0.0060, Val loss = 0.0056
Epoch 50: Train loss = 0.0058, Val loss = 0.0060
Epoch 51: Train loss = 0.0058, Val loss = 0.0058
Epoch 52: Train loss = 0.0058, Val loss = 0.0054
Epoch 53: Train loss = 0.0058, Val loss = 0.0066
Epoch 54: Train loss = 0.0058, Val loss = 0.0051
Epoch 55: Train loss = 0.0058, Val loss = 0.0054
Epoch 56: Train loss = 0.0057, Val loss = 0.0054
Epoch 57: Train loss = 0.0056, Val loss = 0.0051
Epoch 58: Train loss = 0.0057, Val loss = 0.0046
Early stopping triggered at epoch 58.
Using device: mps
Epoch 1: Train loss = 0.0496, Val loss = 0.0306
Epoch 2: Train loss = 0.0484, Val loss = 0.0295
Epoch 3: Train loss = 0.0470, Val loss = 0.0282
Epoch 4: Train loss = 0.0456, Val loss = 0.0268
Epoch 5: Train loss = 0.0441, Val loss = 0.0253
Epoch 6: Train loss = 0.0423, Val loss = 0.0237
Epoch 7: Train loss = 0.0404, Val loss = 0.0218
Epoch 8: Train loss = 0.0383, Val loss = 0.0197
Epoch 9: Train loss = 0.0357, Val loss = 0.0175
Epoch 10: Train loss = 0.0327, Val loss = 0.0151
Epoch 11: Train loss = 0.0295, Val loss = 0.0125
Epoch 12: Train loss = 0.0259, Val loss = 0.0102
Epoch 13: Train loss = 0.0220, Val loss = 0.0086
Epoch 14: Train loss = 0.0186, Val loss = 0.0083
Epoch 15: Train loss = 0.0160, Val loss = 0.0098
Epoch 16: Train loss = 0.0144, Val loss = 0.0118
Epoch 17: Train loss = 0.0134, Val loss = 0.0127
Epoch 18: Train loss = 0.0125, Val loss = 0.0114
Epoch 19: Train loss = 0.0113, Val loss = 0.0093
Epoch 20: Train loss = 0.0104, Val loss = 0.0074
Epoch 21: Train loss = 0.0098, Val loss = 0.0062
Epoch 22: Train loss = 0.0095, Val loss = 0.0058
Epoch 23: Train loss = 0.0093, Val loss = 0.0058
Epoch 24: Train loss = 0.0093, Val loss = 0.0063
Epoch 25: Train loss = 0.0092, Val loss = 0.0068
Epoch 26: Train loss = 0.0090, Val loss = 0.0073
Epoch 27: Train loss = 0.0091, Val loss = 0.0074
Epoch 28: Train loss = 0.0090, Val loss = 0.0073
Epoch 29: Train loss = 0.0089, Val loss = 0.0070
Epoch 30: Train loss = 0.0087, Val loss = 0.0067
Epoch 31: Train loss = 0.0086, Val loss = 0.0066
Epoch 32: Train loss = 0.0086, Val loss = 0.0066
Epoch 33: Train loss = 0.0085, Val loss = 0.0067
Epoch 34: Train loss = 0.0084, Val loss = 0.0069
Epoch 35: Train loss = 0.0084, Val loss = 0.0070
Epoch 36: Train loss = 0.0084, Val loss = 0.0069
Epoch 37: Train loss = 0.0082, Val loss = 0.0068
Epoch 38: Train loss = 0.0084, Val loss = 0.0067
Epoch 39: Train loss = 0.0082, Val loss = 0.0068
Epoch 40: Train loss = 0.0082, Val loss = 0.0068
Epoch 41: Train loss = 0.0082, Val loss = 0.0067
Epoch 42: Train loss = 0.0082, Val loss = 0.0068
Epoch 43: Train loss = 0.0081, Val loss = 0.0069
Epoch 44: Train loss = 0.0081, Val loss = 0.0071
Epoch 45: Train loss = 0.0082, Val loss = 0.0071
Epoch 46: Train loss = 0.0081, Val loss = 0.0070
Epoch 47: Train loss = 0.0080, Val loss = 0.0070
Epoch 48: Train loss = 0.0080, Val loss = 0.0070
Epoch 49: Train loss = 0.0079, Val loss = 0.0072
Epoch 50: Train loss = 0.0080, Val loss = 0.0072
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0493, Val loss = 0.0224
Epoch 2: Train loss = 0.0308, Val loss = 0.0161
Epoch 3: Train loss = 0.0170, Val loss = 0.0130
Epoch 4: Train loss = 0.0101, Val loss = 0.0045
Epoch 5: Train loss = 0.0108, Val loss = 0.0063
Epoch 6: Train loss = 0.0100, Val loss = 0.0095
Epoch 7: Train loss = 0.0088, Val loss = 0.0061
Epoch 8: Train loss = 0.0082, Val loss = 0.0053
Epoch 9: Train loss = 0.0080, Val loss = 0.0080
Epoch 10: Train loss = 0.0078, Val loss = 0.0069
Epoch 11: Train loss = 0.0077, Val loss = 0.0058
Epoch 12: Train loss = 0.0076, Val loss = 0.0077
Epoch 13: Train loss = 0.0075, Val loss = 0.0068
Epoch 14: Train loss = 0.0074, Val loss = 0.0063
Epoch 15: Train loss = 0.0073, Val loss = 0.0078
Epoch 16: Train loss = 0.0073, Val loss = 0.0068
Epoch 17: Train loss = 0.0072, Val loss = 0.0076
Epoch 18: Train loss = 0.0071, Val loss = 0.0072
Epoch 19: Train loss = 0.0072, Val loss = 0.0072
Epoch 20: Train loss = 0.0071, Val loss = 0.0070
Epoch 21: Train loss = 0.0071, Val loss = 0.0067
Epoch 22: Train loss = 0.0070, Val loss = 0.0072
Epoch 23: Train loss = 0.0070, Val loss = 0.0066
Epoch 24: Train loss = 0.0070, Val loss = 0.0071
Epoch 25: Train loss = 0.0069, Val loss = 0.0066
Epoch 26: Train loss = 0.0068, Val loss = 0.0078
Epoch 27: Train loss = 0.0069, Val loss = 0.0061
Epoch 28: Train loss = 0.0068, Val loss = 0.0084
Epoch 29: Train loss = 0.0068, Val loss = 0.0058
Epoch 30: Train loss = 0.0068, Val loss = 0.0082
Epoch 31: Train loss = 0.0067, Val loss = 0.0062
Epoch 32: Train loss = 0.0067, Val loss = 0.0066
Epoch 33: Train loss = 0.0066, Val loss = 0.0077
Epoch 34: Train loss = 0.0065, Val loss = 0.0065
Epoch 35: Train loss = 0.0066, Val loss = 0.0056
Epoch 36: Train loss = 0.0065, Val loss = 0.0077
Epoch 37: Train loss = 0.0066, Val loss = 0.0082
Epoch 38: Train loss = 0.0065, Val loss = 0.0068
Epoch 39: Train loss = 0.0064, Val loss = 0.0059
Epoch 40: Train loss = 0.0063, Val loss = 0.0097
Epoch 41: Train loss = 0.0064, Val loss = 0.0072
Epoch 42: Train loss = 0.0065, Val loss = 0.0045
Epoch 43: Train loss = 0.0064, Val loss = 0.0070
Epoch 44: Train loss = 0.0062, Val loss = 0.0078
Epoch 45: Train loss = 0.0062, Val loss = 0.0049
Epoch 46: Train loss = 0.0062, Val loss = 0.0057
Epoch 47: Train loss = 0.0061, Val loss = 0.0075
Epoch 48: Train loss = 0.0061, Val loss = 0.0071
Epoch 49: Train loss = 0.0062, Val loss = 0.0062
Epoch 50: Train loss = 0.0060, Val loss = 0.0044
Epoch 51: Train loss = 0.0061, Val loss = 0.0048
Epoch 52: Train loss = 0.0059, Val loss = 0.0053
Epoch 53: Train loss = 0.0059, Val loss = 0.0062
Epoch 54: Train loss = 0.0060, Val loss = 0.0082
Epoch 55: Train loss = 0.0060, Val loss = 0.0056
Epoch 56: Train loss = 0.0058, Val loss = 0.0055
Epoch 57: Train loss = 0.0058, Val loss = 0.0046
Epoch 58: Train loss = 0.0057, Val loss = 0.0054
Epoch 59: Train loss = 0.0057, Val loss = 0.0056
Epoch 60: Train loss = 0.0056, Val loss = 0.0059
Early stopping triggered at epoch 60.
Using device: mps
Epoch 1: Train loss = 0.0522, Val loss = 0.0327
Epoch 2: Train loss = 0.0510, Val loss = 0.0316
Epoch 3: Train loss = 0.0497, Val loss = 0.0305
Epoch 4: Train loss = 0.0483, Val loss = 0.0291
Epoch 5: Train loss = 0.0468, Val loss = 0.0276
Epoch 6: Train loss = 0.0449, Val loss = 0.0258
Epoch 7: Train loss = 0.0428, Val loss = 0.0237
Epoch 8: Train loss = 0.0403, Val loss = 0.0212
Epoch 9: Train loss = 0.0374, Val loss = 0.0185
Epoch 10: Train loss = 0.0342, Val loss = 0.0156
Epoch 11: Train loss = 0.0304, Val loss = 0.0127
Epoch 12: Train loss = 0.0262, Val loss = 0.0103
Epoch 13: Train loss = 0.0221, Val loss = 0.0089
Epoch 14: Train loss = 0.0186, Val loss = 0.0091
Epoch 15: Train loss = 0.0159, Val loss = 0.0109
Epoch 16: Train loss = 0.0144, Val loss = 0.0124
Epoch 17: Train loss = 0.0132, Val loss = 0.0121
Epoch 18: Train loss = 0.0119, Val loss = 0.0104
Epoch 19: Train loss = 0.0108, Val loss = 0.0083
Epoch 20: Train loss = 0.0100, Val loss = 0.0068
Epoch 21: Train loss = 0.0097, Val loss = 0.0060
Epoch 22: Train loss = 0.0096, Val loss = 0.0059
Epoch 23: Train loss = 0.0095, Val loss = 0.0061
Epoch 24: Train loss = 0.0093, Val loss = 0.0066
Epoch 25: Train loss = 0.0092, Val loss = 0.0072
Epoch 26: Train loss = 0.0090, Val loss = 0.0076
Epoch 27: Train loss = 0.0089, Val loss = 0.0075
Epoch 28: Train loss = 0.0088, Val loss = 0.0073
Epoch 29: Train loss = 0.0088, Val loss = 0.0070
Epoch 30: Train loss = 0.0087, Val loss = 0.0067
Epoch 31: Train loss = 0.0085, Val loss = 0.0066
Epoch 32: Train loss = 0.0086, Val loss = 0.0067
Epoch 33: Train loss = 0.0085, Val loss = 0.0068
Epoch 34: Train loss = 0.0086, Val loss = 0.0069
Epoch 35: Train loss = 0.0084, Val loss = 0.0068
Epoch 36: Train loss = 0.0084, Val loss = 0.0067
Epoch 37: Train loss = 0.0082, Val loss = 0.0068
Epoch 38: Train loss = 0.0083, Val loss = 0.0068
Epoch 39: Train loss = 0.0082, Val loss = 0.0067
Epoch 40: Train loss = 0.0083, Val loss = 0.0067
Epoch 41: Train loss = 0.0082, Val loss = 0.0068
Epoch 42: Train loss = 0.0081, Val loss = 0.0069
Epoch 43: Train loss = 0.0081, Val loss = 0.0069
Epoch 44: Train loss = 0.0081, Val loss = 0.0070
Epoch 45: Train loss = 0.0081, Val loss = 0.0070
Epoch 46: Train loss = 0.0080, Val loss = 0.0070
Epoch 47: Train loss = 0.0081, Val loss = 0.0070
Epoch 48: Train loss = 0.0080, Val loss = 0.0070
Epoch 49: Train loss = 0.0080, Val loss = 0.0071
Epoch 50: Train loss = 0.0079, Val loss = 0.0072
Early stopping triggered at epoch 50.
Using device: mps
Epoch 1: Train loss = 0.0472, Val loss = 0.0209
Epoch 2: Train loss = 0.0293, Val loss = 0.0109
Epoch 3: Train loss = 0.0176, Val loss = 0.0152
Epoch 4: Train loss = 0.0103, Val loss = 0.0036
Epoch 5: Train loss = 0.0111, Val loss = 0.0043
Epoch 6: Train loss = 0.0102, Val loss = 0.0091
Epoch 7: Train loss = 0.0096, Val loss = 0.0077
Epoch 8: Train loss = 0.0083, Val loss = 0.0047
Epoch 9: Train loss = 0.0082, Val loss = 0.0054
Epoch 10: Train loss = 0.0079, Val loss = 0.0081
Epoch 11: Train loss = 0.0078, Val loss = 0.0069
Epoch 12: Train loss = 0.0078, Val loss = 0.0057
Epoch 13: Train loss = 0.0077, Val loss = 0.0065
Epoch 14: Train loss = 0.0075, Val loss = 0.0078
Epoch 15: Train loss = 0.0075, Val loss = 0.0067
Epoch 16: Train loss = 0.0073, Val loss = 0.0066
Epoch 17: Train loss = 0.0073, Val loss = 0.0082
Epoch 18: Train loss = 0.0073, Val loss = 0.0071
Epoch 19: Train loss = 0.0072, Val loss = 0.0071
Epoch 20: Train loss = 0.0072, Val loss = 0.0076
Epoch 21: Train loss = 0.0071, Val loss = 0.0072
Epoch 22: Train loss = 0.0071, Val loss = 0.0072
Epoch 23: Train loss = 0.0071, Val loss = 0.0075
Epoch 24: Train loss = 0.0071, Val loss = 0.0075
Epoch 25: Train loss = 0.0071, Val loss = 0.0070
Epoch 26: Train loss = 0.0070, Val loss = 0.0074
Epoch 27: Train loss = 0.0069, Val loss = 0.0068
Epoch 28: Train loss = 0.0069, Val loss = 0.0068
Epoch 29: Train loss = 0.0068, Val loss = 0.0075
Epoch 30: Train loss = 0.0069, Val loss = 0.0069
Epoch 31: Train loss = 0.0067, Val loss = 0.0071
Epoch 32: Train loss = 0.0067, Val loss = 0.0065
Epoch 33: Train loss = 0.0066, Val loss = 0.0077
Epoch 34: Train loss = 0.0067, Val loss = 0.0065
Epoch 35: Train loss = 0.0066, Val loss = 0.0061
Epoch 36: Train loss = 0.0065, Val loss = 0.0090
Epoch 37: Train loss = 0.0065, Val loss = 0.0060
Epoch 38: Train loss = 0.0065, Val loss = 0.0062
Epoch 39: Train loss = 0.0064, Val loss = 0.0075
Epoch 40: Train loss = 0.0064, Val loss = 0.0072
Epoch 41: Train loss = 0.0063, Val loss = 0.0065
Epoch 42: Train loss = 0.0062, Val loss = 0.0063
Epoch 43: Train loss = 0.0063, Val loss = 0.0086
Epoch 44: Train loss = 0.0063, Val loss = 0.0054
Epoch 45: Train loss = 0.0062, Val loss = 0.0054
Epoch 46: Train loss = 0.0062, Val loss = 0.0066
Epoch 47: Train loss = 0.0061, Val loss = 0.0075
Epoch 48: Train loss = 0.0061, Val loss = 0.0047
Epoch 49: Train loss = 0.0061, Val loss = 0.0047
Epoch 50: Train loss = 0.0060, Val loss = 0.0042
Early stopping triggered at epoch 50.

Best parameters found in single holdout:
  batch_size: 512
  dropout: 0.1
  hidden_dim: 256
  hidden_layers: 4
  l2_weight: 0.0005
  lr: 0.0005

Results for the best model from sigle hold out evaluation:

--- Task 1 ---
1 day(s) MAE (log-var)             : 0.64061763
1 day(s) RMSE (log-var)            : 0.83228683
1 day(s) R2 (log-var)              : -0.03610080
1 day(s) MAE (var)                 : 2.49631354
1 day(s) RMSE (var)                : 8.65006150
1 day(s) R2 (var)                  : -0.00173153
1 day(s) QLIKE (var)               : 0.49636718
5 day(s) MAE (log-var)             : 0.65466516
5 day(s) RMSE (log-var)            : 0.85062716
5 day(s) R2 (log-var)              : -0.04632316
5 day(s) MAE (var)                 : 2.63176205
5 day(s) RMSE (var)                : 8.95934072
5 day(s) R2 (var)                  : -0.00450938
5 day(s) QLIKE (var)               : 0.52235639
10 day(s) MAE (log-var)            : 0.66138330
10 day(s) RMSE (log-var)           : 0.85546546
10 day(s) R2 (log-var)             : -0.04766021
10 day(s) MAE (var)                : 2.66861418
10 day(s) RMSE (var)               : 9.00214806
10 day(s) R2 (var)                 : -0.00578935
10 day(s) QLIKE (var)              : 0.52905687
20 day(s) MAE (log-var)            : 0.66353974
20 day(s) RMSE (log-var)           : 0.85419605
20 day(s) R2 (log-var)             : -0.03253761
20 day(s) MAE (var)                : 2.68673372
20 day(s) RMSE (var)               : 9.00735573
20 day(s) R2 (var)                 : -0.00267009
20 day(s) QLIKE (var)              : 0.52306397
full horizon MAE (log-var)         : 0.66376878
full horizon RMSE (log-var)        : 0.85178716
full horizon R2 (log-var)          : -0.01547248
full horizon MAE (var)             : 2.70255373
full horizon RMSE (var)            : 9.00769311
full horizon R2 (var)              : 0.00272291
full horizon QLIKE (var)           : 0.51362617

Best single-holdout model saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/test_OHO_SMLP_model.pkl

All tasks EURUSD with Vanilla MLP without CV¶

In [ ]:
import os

save_test_SMLP_model_9_EURUSD_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_SMLP_model_9_EURUSD.pkl"
)

t9_EURUSD_MLP_results, t9_EURUSD_MLP_nested_results, t9_EURUSD_MLP_best_model, t9_EURUSD_MLP_best_params, _ = train_and_evaluate_model(
    model_type="Simple_MLP",
    X_price=eur2_X_price,
    X_time=eur2_X_time,
    y=eur2_y,
    no_tasks=eur2_y.shape[2],
    use_nested_cv=False,
    flatten=True,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,
    save_model_path=save_test_SMLP_model_9_EURUSD_file_path,
    lr=1e-3,
    epochs=50,
    batch_size=32,
    verbose=True,
    time_horizon=28,
    dropout=0,
    l2_weight=1e-5,
    patience=10,
    min_epochs=90,
    hidden_dim=256,
    hidden_layers=3
)

All tasks EURUSD 3 with Vanilla MLP without CV¶

In [ ]:
import os

save_test_SMLP_model_9_EURUSD_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_SMLP_model_9_EURUSD.pkl"
)

t9_EURUSD_MLP_results, t9_EURUSD_MLP_nested_results, t9_EURUSD_MLP_best_model, t9_EURUSD_MLP_best_params, _ = train_and_evaluate_model(
    model_type="Simple_MLP",
    X_price=eur3_X_price,
    X_time=eur3_X_time,
    y=eur3_y,
    no_tasks=eur3_y.shape[2],
    use_nested_cv=False,
    flatten=True,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,
    save_model_path=save_test_SMLP_model_9_EURUSD_file_path,
    lr=5e-4,
    epochs=50,
    batch_size=128,
    verbose=True,
    time_horizon=28,
    dropout=0,
    l2_weight=1e-5,
    patience=10,
    min_epochs=90,
    hidden_dim=64,
    hidden_layers=2
)
Using device: mps
Switching model.loss_type → 'mse' for log_var_ratio training (NLL expects absolute variance).
Batch size for y: 3782
Time steps for y: 28
Features for y: 8

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  0.0
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  0.0
Checking y_train_core (log_var_ratio scaled):
Shape: (2723, 28, 8)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.305190246584762
  Min value:  -4.314240572680862
Checking X_price_val:
Shape: (302, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.864238163413908
  Min value:  0.00022460106458896233
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.1249999999999998
  Min value:  0.0
Checking y_val (log_var_ratio scaled):
Shape: (302, 28, 8)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3172909297315214
  Min value:  -3.6501912507837058
Checking X_price_test:
Shape: (757, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.9835594561318155
  Min value:  -0.00012220230225973657
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.4999999999999996
  Min value:  0.0
Checking y_test (log_var_ratio scaled):
Shape: (757, 28, 8)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.2742605055227685
  Min value:  -4.387187839607449

Size of the X data for training (core): (2723, 120)
Size of the Y data for training (core): (2723, 224)
Epoch 1: Train mse = 1.0712 | Val mse = 0.6699
Epoch 2: Train mse = 1.0192 | Val mse = 0.6591
Epoch 3: Train mse = 1.0088 | Val mse = 0.6530
Epoch 4: Train mse = 1.0028 | Val mse = 0.6516
Epoch 5: Train mse = 1.0005 | Val mse = 0.6493
Epoch 6: Train mse = 0.9976 | Val mse = 0.6463
Epoch 7: Train mse = 0.9930 | Val mse = 0.6396
Epoch 8: Train mse = 0.9857 | Val mse = 0.6292
Epoch 9: Train mse = 0.9763 | Val mse = 0.6106
Epoch 10: Train mse = 0.9603 | Val mse = 0.5837
Epoch 11: Train mse = 0.9201 | Val mse = 0.5625
Epoch 12: Train mse = 0.8633 | Val mse = 0.5451
Epoch 13: Train mse = 0.8196 | Val mse = 0.5403
Epoch 14: Train mse = 0.7944 | Val mse = 0.5307
Epoch 15: Train mse = 0.7813 | Val mse = 0.5341
Epoch 16: Train mse = 0.7725 | Val mse = 0.5412
Epoch 17: Train mse = 0.7675 | Val mse = 0.5470
Epoch 18: Train mse = 0.7629 | Val mse = 0.5582
Epoch 19: Train mse = 0.7587 | Val mse = 0.5686
Epoch 20: Train mse = 0.7549 | Val mse = 0.5796
Epoch 21: Train mse = 0.7514 | Val mse = 0.5903
Epoch 22: Train mse = 0.7483 | Val mse = 0.5990
Epoch 23: Train mse = 0.7455 | Val mse = 0.6058
Epoch 24: Train mse = 0.7428 | Val mse = 0.6109
Epoch 25: Train mse = 0.7403 | Val mse = 0.6142
Epoch 26: Train mse = 0.7378 | Val mse = 0.6162
Epoch 27: Train mse = 0.7356 | Val mse = 0.6172
Epoch 28: Train mse = 0.7336 | Val mse = 0.6175
Epoch 29: Train mse = 0.7319 | Val mse = 0.6172
Epoch 30: Train mse = 0.7304 | Val mse = 0.6164
Epoch 31: Train mse = 0.7292 | Val mse = 0.6153
Epoch 32: Train mse = 0.7282 | Val mse = 0.6143
Epoch 33: Train mse = 0.7274 | Val mse = 0.6135
Epoch 34: Train mse = 0.7268 | Val mse = 0.6132
Epoch 35: Train mse = 0.7262 | Val mse = 0.6137
Epoch 36: Train mse = 0.7256 | Val mse = 0.6148
Epoch 37: Train mse = 0.7251 | Val mse = 0.6163
Epoch 38: Train mse = 0.7245 | Val mse = 0.6181
Epoch 39: Train mse = 0.7240 | Val mse = 0.6200
Epoch 40: Train mse = 0.7234 | Val mse = 0.6218
Epoch 41: Train mse = 0.7228 | Val mse = 0.6233
Epoch 42: Train mse = 0.7222 | Val mse = 0.6247
Epoch 43: Train mse = 0.7216 | Val mse = 0.6258
Epoch 44: Train mse = 0.7209 | Val mse = 0.6267
Epoch 45: Train mse = 0.7201 | Val mse = 0.6274
Epoch 46: Train mse = 0.7193 | Val mse = 0.6279
Epoch 47: Train mse = 0.7185 | Val mse = 0.6281
Epoch 48: Train mse = 0.7176 | Val mse = 0.6282
Epoch 49: Train mse = 0.7167 | Val mse = 0.6281
Epoch 50: Train mse = 0.7157 | Val mse = 0.6279

Parameters used in the single-fit model:
input_dim: 120
output_dim: 224
dropout: 0.00000000
lr: 0.00050000
epochs: 50
batch_size: 128
device: mps
verbose: True
hidden_layers: 2
no_tasks: 8
l2_weight: 0.00001000
patience: 10
min_epochs: 90
min_delta: 0.00010000
hidden_dim: 64
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.11103872
1 day(s) RMSE                      : 0.20864335
1 day(s) R2                        : 0.04625683
1 day(s) Pearson r                 : 0.34357118
1 day(s) QLIKE                     : 0.51659101
3 day(s) MAE                       : 0.11042431
3 day(s) RMSE                      : 0.20581183
3 day(s) R2                        : 0.06480831
3 day(s) Pearson r                 : 0.33792193
3 day(s) QLIKE                     : 0.51526911
5 day(s) MAE                       : 0.11063432
5 day(s) RMSE                      : 0.20497981
5 day(s) R2                        : 0.06181290
5 day(s) Pearson r                 : 0.32561392
5 day(s) QLIKE                     : 0.51706068
10 day(s) MAE                      : 0.11042366
10 day(s) RMSE                     : 0.20422895
10 day(s) R2                       : 0.05978366
10 day(s) Pearson r                : 0.31826269
10 day(s) QLIKE                    : 0.51751716
20 day(s) MAE                      : 0.11071115
20 day(s) RMSE                     : 0.20385819
20 day(s) R2                       : 0.05214553
20 day(s) Pearson r                : 0.30587741
20 day(s) QLIKE                    : 0.51985509
full horizon MAE                   : 0.11133031
full horizon RMSE                  : 0.20428137
full horizon R2                    : 0.04304657
full horizon Pearson r             : 0.28814428
full horizon QLIKE                 : 0.52567919

--- Task 2 ---
1 day(s) MAE                       : 0.15539526
1 day(s) RMSE                      : 0.31164278
1 day(s) R2                        : -0.33090319
1 day(s) Pearson r                 : 0.01163244
1 day(s) QLIKE                     : 12.98528923
3 day(s) MAE                       : 0.15421316
3 day(s) RMSE                      : 0.30966925
3 day(s) R2                        : -0.32977651
3 day(s) Pearson r                 : 0.02741066
3 day(s) QLIKE                     : 13.01252850
5 day(s) MAE                       : 0.15378779
5 day(s) RMSE                      : 0.30888711
5 day(s) R2                        : -0.32957251
5 day(s) Pearson r                 : 0.01242597
5 day(s) QLIKE                     : 13.14369696
10 day(s) MAE                      : 0.15330539
10 day(s) RMSE                     : 0.30815422
10 day(s) R2                       : -0.32890168
10 day(s) Pearson r                : 0.03218002
10 day(s) QLIKE                    : 13.08815369
20 day(s) MAE                      : 0.15307454
20 day(s) RMSE                     : 0.30788331
20 day(s) R2                       : -0.32835322
20 day(s) Pearson r                : 0.03486894
20 day(s) QLIKE                    : 13.12220077
full horizon MAE                   : 0.15358063
full horizon RMSE                  : 0.30866619
full horizon R2                    : -0.32901815
full horizon Pearson r             : 0.02788881
full horizon QLIKE                 : 13.11606246

--- Task 3 ---
1 day(s) MAE                       : 0.06343678
1 day(s) RMSE                      : 0.08439328
1 day(s) R2                        : 0.00216887
1 day(s) Pearson r                 : 0.71092168
1 day(s) QLIKE                     : 0.06096452
3 day(s) MAE                       : 0.06091535
3 day(s) RMSE                      : 0.08097772
3 day(s) R2                        : 0.07894655
3 day(s) Pearson r                 : 0.71231073
3 day(s) QLIKE                     : 0.06334039
5 day(s) MAE                       : 0.06434223
5 day(s) RMSE                      : 0.08967931
5 day(s) R2                        : -0.12704834
5 day(s) Pearson r                 : 0.70459164
5 day(s) QLIKE                     : 0.06194658
10 day(s) MAE                      : 0.06365845
10 day(s) RMSE                     : 0.08605711
10 day(s) R2                       : -0.03351570
10 day(s) Pearson r                : 0.66658640
10 day(s) QLIKE                    : 0.06121039
20 day(s) MAE                      : 0.06133690
20 day(s) RMSE                     : 0.08162220
20 day(s) R2                       : 0.05503045
20 day(s) Pearson r                : 0.67642336
20 day(s) QLIKE                    : 0.05986425
full horizon MAE                   : 0.06164279
full horizon RMSE                  : 0.08202875
full horizon R2                    : 0.03792841
full horizon Pearson r             : 0.66917059
full horizon QLIKE                 : 0.05879267

--- Task 4 ---
1 day(s) MAE                       : 0.03453525
1 day(s) RMSE                      : 0.05623033
1 day(s) R2                        : -0.60508741
1 day(s) Pearson r                 : -0.10135682
1 day(s) QLIKE                     : 11.11405374
3 day(s) MAE                       : 0.03452383
3 day(s) RMSE                      : 0.05621810
3 day(s) R2                        : -0.60398405
3 day(s) Pearson r                 : -0.08810974
3 day(s) QLIKE                     : 11.40786087
5 day(s) MAE                       : 0.03449000
5 day(s) RMSE                      : 0.05621482
5 day(s) R2                        : -0.60271880
5 day(s) Pearson r                 : -0.07085644
5 day(s) QLIKE                     : 11.29629285
10 day(s) MAE                      : 0.03441563
10 day(s) RMSE                     : 0.05619619
10 day(s) R2                       : -0.59947874
10 day(s) Pearson r                : -0.06286938
10 day(s) QLIKE                    : 11.35737413
20 day(s) MAE                      : 0.03437502
20 day(s) RMSE                     : 0.05618612
20 day(s) R2                       : -0.59781729
20 day(s) Pearson r                : -0.04927722
20 day(s) QLIKE                    : 11.14983119
full horizon MAE                   : 0.03432559
full horizon RMSE                  : 0.05617418
full horizon R2                    : -0.59557058
full horizon Pearson r             : -0.04121754
full horizon QLIKE                 : 11.19202595

--- Task 5 ---
1 day(s) MAE                       : 0.69546400
1 day(s) RMSE                      : 0.76492033
1 day(s) R2                        : -3.33174581
1 day(s) Pearson r                 : 0.08997483
1 day(s) QLIKE                     : 3.42706487
3 day(s) MAE                       : 0.69218298
3 day(s) RMSE                      : 0.76131513
3 day(s) R2                        : -3.29055973
3 day(s) Pearson r                 : 0.07644863
3 day(s) QLIKE                     : 3.45252049
5 day(s) MAE                       : 0.68458022
5 day(s) RMSE                      : 0.75450730
5 day(s) R2                        : -3.21320529
5 day(s) Pearson r                 : 0.08190575
5 day(s) QLIKE                     : 3.45892221
10 day(s) MAE                      : 0.69400294
10 day(s) RMSE                     : 0.76486142
10 day(s) R2                       : -3.32740476
10 day(s) Pearson r                : 0.07096972
10 day(s) QLIKE                    : 3.51356565
20 day(s) MAE                      : 0.69670907
20 day(s) RMSE                     : 0.76766275
20 day(s) R2                       : -3.35741105
20 day(s) Pearson r                : 0.05621275
20 day(s) QLIKE                    : 3.50296607
full horizon MAE                   : 0.69462232
full horizon RMSE                  : 0.76490433
full horizon R2                    : -3.32401319
full horizon Pearson r             : 0.03779874
full horizon QLIKE                 : 3.52336656

--- Task 6 ---
1 day(s) MAE                       : 0.03310339
1 day(s) RMSE                      : 0.06223379
1 day(s) R2                        : -0.38629635
1 day(s) Pearson r                 : -0.17633517
1 day(s) QLIKE                     : 4.16885653
3 day(s) MAE                       : 0.03312630
3 day(s) RMSE                      : 0.06222116
3 day(s) R2                        : -0.38552510
3 day(s) Pearson r                 : -0.15250340
3 day(s) QLIKE                     : 4.25629915
5 day(s) MAE                       : 0.03305612
5 day(s) RMSE                      : 0.06220861
5 day(s) R2                        : -0.38459070
5 day(s) Pearson r                 : -0.16008457
5 day(s) QLIKE                     : 4.36238227
10 day(s) MAE                      : 0.03319670
10 day(s) RMSE                     : 0.06231751
10 day(s) R2                       : -0.38857092
10 day(s) Pearson r                : -0.12495025
10 day(s) QLIKE                    : 4.30631520
20 day(s) MAE                      : 0.03302093
20 day(s) RMSE                     : 0.06226365
20 day(s) R2                       : -0.38531604
20 day(s) Pearson r                : -0.12767741
20 day(s) QLIKE                    : 4.27389322
full horizon MAE                   : 0.03288570
full horizon RMSE                  : 0.06222517
full horizon R2                    : -0.38260683
full horizon Pearson r             : -0.12637777
full horizon QLIKE                 : 4.24863318

--- Task 7 ---
1 day(s) MAE                       : 0.00512763
1 day(s) RMSE                      : 0.00780973
1 day(s) R2                        : -0.75770887
1 day(s) Pearson r                 : 0.17016662
1 day(s) QLIKE                     : 9.07038862
3 day(s) MAE                       : 0.00512744
3 day(s) RMSE                      : 0.00780943
3 day(s) R2                        : -0.75757006
3 day(s) Pearson r                 : 0.10004566
3 day(s) QLIKE                     : 10.31558587
5 day(s) MAE                       : 0.00512751
5 day(s) RMSE                      : 0.00780954
5 day(s) R2                        : -0.75762192
5 day(s) Pearson r                 : 0.08203973
5 day(s) QLIKE                     : 10.04213929
10 day(s) MAE                      : 0.00512755
10 day(s) RMSE                     : 0.00780961
10 day(s) R2                       : -0.75765181
10 day(s) Pearson r                : 0.07528033
10 day(s) QLIKE                    : 9.72162298
20 day(s) MAE                      : 0.00513558
20 day(s) RMSE                     : 0.00781228
20 day(s) R2                       : -0.76093509
20 day(s) Pearson r                : 0.07207377
20 day(s) QLIKE                    : 9.64707149
full horizon MAE                   : 0.00515839
full horizon RMSE                  : 0.00782259
full horizon R2                    : -0.76935270
full horizon Pearson r             : 0.06373753
full horizon QLIKE                 : 9.65553754

--- Task 8 ---
1 day(s) MAE                       : 2.49077294
1 day(s) RMSE                      : 3.36139940
1 day(s) R2                        : -6.39456952
1 day(s) Pearson r                 : 0.25720817
1 day(s) QLIKE                     : 0.05153811
3 day(s) MAE                       : 2.42889177
3 day(s) RMSE                      : 3.41054160
3 day(s) R2                        : -6.60967509
3 day(s) Pearson r                 : 0.22954922
3 day(s) QLIKE                     : 0.05917651
5 day(s) MAE                       : 2.57341971
5 day(s) RMSE                      : 3.52759259
5 day(s) R2                        : -7.13666040
5 day(s) Pearson r                 : 0.23911820
5 day(s) QLIKE                     : 0.06120540
10 day(s) MAE                      : 2.65922680
10 day(s) RMSE                     : 3.60099697
10 day(s) R2                       : -7.46081003
10 day(s) Pearson r                : 0.22169162
10 day(s) QLIKE                    : 0.06317712
20 day(s) MAE                      : 2.79918060
20 day(s) RMSE                     : 3.74973350
20 day(s) R2                       : -8.17319960
20 day(s) Pearson r                : 0.19941223
20 day(s) QLIKE                    : 0.06600200
full horizon MAE                   : 2.87952960
full horizon RMSE                  : 3.81439699
full horizon R2                    : -8.50153808
full horizon Pearson r             : 0.19067444
full horizon QLIKE                 : 0.06677623

Test 3 tasks Vanila MLP with CV¶

In [ ]:
import os

save_checkpoint_SMLP_model_3_CV_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "checkpoint_SMLP_model_3_CV.pkl"
)

save_test_SMLP_model_3_CV_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_SMLP_model_3_CV.pkl"
)

param_grid_test_SMLP_3_CV_aapl_cv = {
    "dropout": [0.2],
    "lr": [3e-5],
    "batch_size": [128, 256],
    "epochs": [3],
    "resume": [True],
    "verbose": [True]
}

t3CV_SMLP_results, t3CV_SMLP_nested_results, t3CV_SMLP_best_model, t3CV_SMLP_best_params = train_and_evaluate_model(
    model_type="Simple_MLP",
    X_price=aapl_X_price,
    X_time=aapl_X_time,
    y=aapl_y,
    no_tasks=3,
    use_nested_cv=True,
    flatten=True,
    merge_price_time=True,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,
    save_model_path=save_test_SMLP_model_3_CV_file_path,
    lr=3e-6,
    epochs=150,
    batch_size=64,
    verbose=True,
    hidden_layers=21,
    outer_folds=2,
    inner_folds=2,
    param_grid=param_grid_test_SMLP_3_CV_aapl_cv,
    checkpoint_path= save_checkpoint_SMLP_model_3_CV_file_path
)

LSTM models¶

Vanilla LSTM¶

Vanilla LSTM architecture¶

In [121]:
import torch
import torch.nn as nn

class LSTMSeq2Seq(nn.Module):
    def __init__(self, input_dim, output_dim, hidden_layers = 2, dropout=0.0, pred_len=28, hidden_dim=64, no_tasks=1):
        super().__init__()
        self.encoder_layers = hidden_layers
        self.decoder_layers = hidden_layers
        self.pred_len = pred_len
        self.output_dim = output_dim
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim


        enc_dec_dropout = dropout if hidden_layers > 1 else 0.0

        self.encoder = nn.LSTM(
            input_size=self.input_dim,
            hidden_size=self.hidden_dim,
            num_layers=self.encoder_layers,
            bias=True,
            batch_first=True,
            dropout=enc_dec_dropout,
            bidirectional=False,
            proj_size=0,
        )

        self.decoder = nn.LSTM(
            input_size=self.output_dim,
            hidden_size=self.hidden_dim,
            num_layers=self.decoder_layers,
            bias=True,
            batch_first=True,
            dropout=enc_dec_dropout,
            bidirectional=False,
            proj_size=0,
        )

        
        self.start_proj = nn.Linear(self.hidden_dim, self.output_dim)

        self.fc = nn.Linear(self.hidden_dim, self.output_dim)

 
        self._sched_mode = None         
        self._sched_start = 1.0
        self._sched_end = 0.0
        self._sched_steps = 1
        self._sched_step = 0
        self.teacher_forcing_ratio = 1.0 
        self._tf_floor = 1e-6


    def set_tf_schedule(self, start: float = 1.0, end: float = 0.0, steps: int = 1000, mode: str = "linear"):
        self._sched_mode = mode
        self._sched_start = float(start)
        self._sched_end = float(end)
        self._sched_steps = max(int(steps), 1)
        self._sched_step = 0
        self.teacher_forcing_ratio = self._sched_start

    def disable_tf_schedule(self, ratio: float = 1.0):
        self._sched_mode = None
        self.teacher_forcing_ratio = float(ratio)

    def _advance_tf_ratio(self):
        if self._sched_mode is None:
            return
        t = min(self._sched_step / max(self._sched_steps - 1, 1), 1.0)
        if self._sched_mode == "linear":
            r = self._sched_start + t * (self._sched_end - self._sched_start)
        elif self._sched_mode == "exp":
            start = max(self._sched_start, self._tf_floor)
            end = max(self._sched_end, self._tf_floor)
            r = start * ((end / start) ** t)
        else:
            r = self.teacher_forcing_ratio
        self.teacher_forcing_ratio = float(min(max(r, 0.0), 1.0))
        self._sched_step += 1


    def forward(self, x, y=None, teacher_forcing=True):
        B = x.size(0)

       
        _, (h, c) = self.encoder(x)       
        h_dec, c_dec = h, c

        
        z = h[-1]                           
        dec_in = self.start_proj(z).unsqueeze(1)  

        outs = []

 
        if self.training and teacher_forcing:
            self._advance_tf_ratio()
            tf_ratio = self.teacher_forcing_ratio
        else:
            tf_ratio = 0.0  

        for t in range(self.pred_len):
            out, (h_dec, c_dec) = self.decoder(dec_in, (h_dec, c_dec))  
            pred = self.fc(out)                                        
            outs.append(pred)

            
            use_tf = (
                teacher_forcing
                and self.training
                and (y is not None)
                and (t < y.size(1))
                and (torch.rand((), device=x.device) < tf_ratio)
            )
            dec_in = y[:, t:t+1] if use_tf else pred

        return torch.cat(outs, dim=1)

Vanilla LSTM wrapper¶

In [122]:
from sklearn.base import BaseEstimator, RegressorMixin
import torch
import torch.nn as nn


class SimpleLSTMWrapper(BaseEstimator, RegressorMixin):
    def __init__(
        self,
        input_dim=10,
        output_dim=1,
        dropout=0.2,
        lr=1e-3,
        epochs=50,
        batch_size=512,
        device=None,
        verbose=True,
        hidden_layers=3,
        hidden_dim=64,
        no_tasks=1,
        pred_len=28,
        l2_weight=1e-4,
        patience=10,
        min_epochs=30,
        min_delta=1e-4,

        loss_type="gauss_nll_var",    
        target_is_logvar=True,         
        nll_eps=1e-12,                 
        clamp_logvar_min=-20.0,       
        clamp_logvar_max=20.0,
        student_df=5.0,                
    ):
        self.input_dim = int(input_dim)
        self.output_dim = int(output_dim)
        self.dropout = float(dropout)
        self.lr = float(lr)
        self.epochs = int(epochs)
        self.batch_size = int(batch_size)
        self.device = device or (
            "mps" if torch.backends.mps.is_available()
            else "cuda" if torch.cuda.is_available()
            else "cpu"
        )
        self.verbose = bool(verbose)
        self.hidden_layers = int(hidden_layers)
        self.no_tasks = int(no_tasks)
        self.pred_len = int(pred_len)
        self.l2_weight = float(l2_weight)
        self.hidden_dim = int(hidden_dim)
        self.patience = int(patience)
        self.min_epochs = int(min_epochs)
        self.min_delta = float(min_delta)

        self.loss_type = str(loss_type)
        self.target_is_logvar = bool(target_is_logvar)
        self.nll_eps = float(nll_eps)
        self.clamp_logvar_min = float(clamp_logvar_min)
        self.clamp_logvar_max = float(clamp_logvar_max)
        self.student_df = float(student_df)

        self._build()
        if self.verbose:
            print("Using device:", self.device)

    def _build(self):
        self.model = LSTMSeq2Seq(
            input_dim=self.input_dim,
            output_dim=self.output_dim,
            hidden_layers=self.hidden_layers,
            dropout=self.dropout,
            pred_len=self.pred_len,
            hidden_dim=self.hidden_dim,
            no_tasks=self.no_tasks,
        ).to(self.device)

        self.optimizer = torch.optim.AdamW(
            self.model.parameters(), lr=self.lr, weight_decay=self.l2_weight
        )
        self._mse = nn.MSELoss()

    def _compute_loss(self, y_hat, y_true):
        if self.loss_type == "mse":
            return self._mse(y_hat, y_true)


        z = torch.clamp(y_hat, self.clamp_logvar_min, self.clamp_logvar_max)


        if self.target_is_logvar:
            v = torch.exp(y_true)
        else:
            v = y_true
        v = torch.clamp(v, min=self.nll_eps)

        if self.loss_type == "gauss_nll_var":
            loss = v * torch.exp(-z) + z
            return loss.mean()

        if self.loss_type == "student_t_nll_var":
            nu = torch.tensor(self.student_df, device=z.device, dtype=z.dtype)
            loss = 0.5 * (nu + 1.0) * torch.log1p(v / (nu * torch.exp(z))) + 0.5 * z
            return loss.mean()

        raise ValueError(f"Unknown loss_type: {self.loss_type}")

    def fit(self, X, y, X_test, y_test):
      
        if y.ndim != 3:
            raise ValueError(f"y must be 3D (N, pred_len, output_dim); got {y.shape}")
        if y.shape[1] != self.pred_len:
            raise ValueError(f"y.shape[1] ({y.shape[1]}) must equal pred_len ({self.pred_len})")
        if y.shape[2] != self.output_dim:
            raise ValueError(f"y.shape[2] ({y.shape[2]}) must equal output_dim ({self.output_dim})")

   
        X = torch.tensor(X, dtype=torch.float32, device=self.device)
        y = torch.tensor(y, dtype=torch.float32, device=self.device)
        X_val = torch.tensor(X_test, dtype=torch.float32, device=self.device)
        y_val = torch.tensor(y_test, dtype=torch.float32, device=self.device)

        loader = torch.utils.data.DataLoader(
            torch.utils.data.TensorDataset(X, y),
            batch_size=self.batch_size,
            shuffle=True,
        )

        best_val = float("inf")
        best_state = None
        no_imp = 0

        for epoch in range(1, self.epochs + 1):

            self.model.train()
            tr_loss = 0.0
            nb = 0
            for xb, yb in loader:
                self.optimizer.zero_grad(set_to_none=True)
                preds = self.model(xb, yb, teacher_forcing=True)  
                loss = self._compute_loss(preds, yb)
                loss.backward()
                self.optimizer.step()
                tr_loss += float(loss.detach().cpu())
                nb += 1
            tr_loss /= max(1, nb)

            self.model.eval()
            with torch.no_grad():
                val_preds = self.model(X_val, teacher_forcing=False)
                vloss = float(self._compute_loss(val_preds, y_val).detach().cpu())

            if self.verbose:
                print(f"Epoch {epoch}: Train {self.loss_type} = {tr_loss:.4f} | Val {self.loss_type} = {vloss:.4f}")

            if (best_val - vloss) > self.min_delta:
                best_val = vloss
                no_imp = 0
                best_state = {k: v.detach().cpu() for k, v in self.model.state_dict().items()}
            else:
                no_imp += 1

            if no_imp >= self.patience and epoch >= self.min_epochs:
                if self.verbose:
                    print(f"Early stopping triggered at epoch {epoch}.")
                break

        if best_state is not None:
            self.model.load_state_dict(best_state)

        return self

    def predict(self, X):
        self.model.eval()
        X = torch.tensor(X, dtype=torch.float32, device=self.device)
        with torch.no_grad():
            preds = self.model(X, teacher_forcing=False)
        return preds.detach().cpu().numpy()

    def get_params(self, deep=True):
        return dict(
            input_dim=self.input_dim,
            output_dim=self.output_dim,
            hidden_layers=self.hidden_layers,
            dropout=self.dropout,
            lr=self.lr,
            epochs=self.epochs,
            batch_size=self.batch_size,
            device=self.device,
            verbose=self.verbose,
            no_tasks=self.no_tasks,
            pred_len=self.pred_len,
            l2_weight=self.l2_weight,
            hidden_dim=self.hidden_dim,
            patience=self.patience,
            min_epochs=self.min_epochs,
            min_delta=self.min_delta,

            loss_type=self.loss_type,
            target_is_logvar=self.target_is_logvar,
            nll_eps=self.nll_eps,
            clamp_logvar_min=self.clamp_logvar_min,
            clamp_logvar_max=self.clamp_logvar_max,
            student_df=self.student_df,
        )

    def set_params(self, **params):
        arch_keys = {
            "input_dim", "output_dim", "hidden_layers", "hidden_dim",
            "dropout", "no_tasks", "pred_len", "device"
        }
        need_rebuild = any(k in arch_keys for k in params)

        for k, v in params.items():
            if hasattr(self, k):
                setattr(self, k, v)

        if need_rebuild:
            self._build()
        else:
            for g in self.optimizer.param_groups:
                g["lr"] = self.lr
                g["weight_decay"] = self.l2_weight

        return self

Test 1 task EURUSD with Vanilla LSTM without CV¶

In [359]:
import os
save_test_EURUSD_SLSTM_model_1_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_SLSTM_model_1_task.pkl"
)

t1_EURUSD_SLSTM_results, t1_EURUSD_SLSTM_nested_results, t1_EURUSD_SLSTM_best_model, t1_EURUSD_SLSTM_best_params, _ = train_and_evaluate_model(
    model_type="Simple_LSTM",
    X_price=eur2_X_price,
    X_time=eur2_X_time,
    y=eur2_y,
    no_tasks=1,
    use_nested_cv=False,
    flatten=False,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=False,
    normalize_y=True,
    save_model_path=save_test_EURUSD_SLSTM_model_1_file_path,
    lr=5e-4,
    epochs=50,
    batch_size=16,
    verbose=True,
    hidden_layers=2,
    time_horizon=1,
    l2_weight= 1e-6,
    hidden_dim=32
)
Using device: mps
Batch size for y: 3782
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 8)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0000000000000002
  Min value:  0.0
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_train_core (log_var_ratio scaled):
Shape: (2723, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.1619931883384433
  Min value:  -3.7675739443866982
Checking X_price_val:
Shape: (302, 60, 8)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7917474776.440305
  Min value:  -0.007013492710980488
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_val (log_var_ratio scaled):
Shape: (302, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.9457568816440924
  Min value:  -3.7365559140878837
Checking X_price_test:
Shape: (757, 60, 8)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.4141668678880164
  Min value:  -0.00012220230225973657
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_test (log_var_ratio scaled):
Shape: (757, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.229980689853926
  Min value:  -4.487665162496176

Size of the X data for training (core): (2723, 60, 8)
Size of the Y data for training (core): (2723, 1, 1)
Epoch 1: Train MSE = 0.9493 | Val MSE = 0.7655
Epoch 2: Train MSE = 0.8263 | Val MSE = 0.7812
Epoch 3: Train MSE = 0.7749 | Val MSE = 0.7782
Epoch 4: Train MSE = 0.7608 | Val MSE = 0.8438
Epoch 5: Train MSE = 0.7569 | Val MSE = 0.8543
Epoch 6: Train MSE = 0.7416 | Val MSE = 0.8343
Epoch 7: Train MSE = 0.7488 | Val MSE = 0.8421
Epoch 8: Train MSE = 0.7583 | Val MSE = 0.8486
Epoch 9: Train MSE = 0.7380 | Val MSE = 0.8175
Epoch 10: Train MSE = 0.7381 | Val MSE = 0.8412
Epoch 11: Train MSE = 0.7326 | Val MSE = 0.8615
Epoch 12: Train MSE = 0.7414 | Val MSE = 0.8658
Epoch 13: Train MSE = 0.7321 | Val MSE = 0.8298
Epoch 14: Train MSE = 0.7423 | Val MSE = 0.8705
Epoch 15: Train MSE = 0.7351 | Val MSE = 0.8571
Epoch 16: Train MSE = 0.7291 | Val MSE = 0.8576
Epoch 17: Train MSE = 0.7367 | Val MSE = 0.8892
Epoch 18: Train MSE = 0.7251 | Val MSE = 0.8759
Epoch 19: Train MSE = 0.7269 | Val MSE = 0.8966
Epoch 20: Train MSE = 0.7357 | Val MSE = 0.9217
Epoch 21: Train MSE = 0.7407 | Val MSE = 0.8192
Epoch 22: Train MSE = 0.7213 | Val MSE = 0.8646
Epoch 23: Train MSE = 0.7246 | Val MSE = 0.8391
Epoch 24: Train MSE = 0.7232 | Val MSE = 0.8585
Epoch 25: Train MSE = 0.7198 | Val MSE = 0.8758
Epoch 26: Train MSE = 0.7225 | Val MSE = 0.8609
Epoch 27: Train MSE = 0.7313 | Val MSE = 0.8423
Epoch 28: Train MSE = 0.7170 | Val MSE = 0.8404
Epoch 29: Train MSE = 0.7143 | Val MSE = 0.8295
Epoch 30: Train MSE = 0.7323 | Val MSE = 0.8930
Epoch 31: Train MSE = 0.7175 | Val MSE = 0.8645
Epoch 32: Train MSE = 0.7158 | Val MSE = 0.9109
Epoch 33: Train MSE = 0.7194 | Val MSE = 0.8513
Epoch 34: Train MSE = 0.7177 | Val MSE = 0.8409
Epoch 35: Train MSE = 0.7151 | Val MSE = 0.9039
Epoch 36: Train MSE = 0.7193 | Val MSE = 0.8874
Epoch 37: Train MSE = 0.7151 | Val MSE = 0.8886
Epoch 38: Train MSE = 0.7181 | Val MSE = 0.8565
Epoch 39: Train MSE = 0.7115 | Val MSE = 0.8742
Epoch 40: Train MSE = 0.7122 | Val MSE = 0.9150
Epoch 41: Train MSE = 0.7452 | Val MSE = 0.8346
Epoch 42: Train MSE = 0.7596 | Val MSE = 0.7501
Epoch 43: Train MSE = 0.7878 | Val MSE = 0.8221
Epoch 44: Train MSE = 0.7534 | Val MSE = 0.8296
Epoch 45: Train MSE = 0.7414 | Val MSE = 0.9046
Epoch 46: Train MSE = 0.7357 | Val MSE = 0.8727
Epoch 47: Train MSE = 0.7797 | Val MSE = 0.8792
Epoch 48: Train MSE = 0.7537 | Val MSE = 0.8038
Epoch 49: Train MSE = 0.7428 | Val MSE = 0.8976
Epoch 50: Train MSE = 0.7305 | Val MSE = 0.8743

Parameters used in the single-fit model:
input_dim: 8
output_dim: 1
hidden_layers: 2
dropout: 0.00000000
lr: 0.00050000
epochs: 50
batch_size: 16
device: mps
verbose: True
no_tasks: 1
pred_len: 1
l2_weight: 0.00000100
hidden_dim: 32
patience: 10
min_epochs: 50
min_delta: 0.00010000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.12991513
1 day(s) RMSE                      : 0.21536798
1 day(s) R2                        : -0.01621268
1 day(s) Pearson r                 : 0.38450392
1 day(s) QLIKE                     : 0.56875954
full horizon MAE                   : 0.12991513
full horizon RMSE                  : 0.21536798
full horizon R2                    : -0.01621268
full horizon Pearson r             : 0.38450392
full horizon QLIKE                 : 0.56875954

Test 3 task EURUSD with Vanilla LSTM without CV¶

In [ ]:
import os
save_test3_EURUSD_SLSTM_model_1_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_SLSTM_model_1_task.pkl"
)

t3_EURUSD_SLSTM_results, t3_EURUSD_SLSTM_nested_results, t3_EURUSD_SLSTM_best_model, t3_EURUSD_SLSTM_best_params, _ = train_and_evaluate_model(
    model_type="Simple_LSTM",
    X_price=eur3_X_price,
    X_time=eur3_X_time,
    y=eur3_y,
    no_tasks=1,
    use_nested_cv=False,
    flatten=False,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=False,
    normalize_y=True,
    save_model_path=save_test3_EURUSD_SLSTM_model_1_file_path,
    lr=3e-3,
    epochs=50,
    batch_size=8,
    verbose=True,
    hidden_layers=2,
    time_horizon=28,
    l2_weight= 1e-6,
    hidden_dim=32,
    patience=10,
    min_epochs=30,
    min_delta=1e-4,
    target_mode="log",
    min_epochs=10
)

Vanila LSTM with AAPL4¶

In [789]:
import os
save_test3_EURUSD_SLSTM_model_1_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_SLSTM_model_1_task.pkl"
)

t3_EURUSD_SLSTM_results, t3_EURUSD_SLSTM_nested_results, t3_EURUSD_SLSTM_best_model, t3_EURUSD_SLSTM_best_params, yy = train_and_evaluate_model(
    model_type="Simple_LSTM",
    X_price=appl4_X_price,
    X_time=appl4_X_time,
    y=appl4_y,
    no_tasks=1,
    use_nested_cv=False,
    flatten=False,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=False,
    normalize_y=True,
    save_model_path=save_test3_EURUSD_SLSTM_model_1_file_path,
    lr=1e-3,
    epochs=2,
    batch_size=16,
    verbose=True,
    hidden_layers=3,
    time_horizon=60,
    l2_weight= 1e-5,
    hidden_dim=16,
    patience=5,
    min_epochs=10,
    target_mode="log_mse",
    )
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 60
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714204
  Min value:  -0.03434294798653995
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_train_core (log_mse scaled):
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.23813116832301
  Min value:  -2.4157720513117233
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.051920323212192546
  Min value:  -0.0338203458440787
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_val (log_mse scaled):
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.9216334253580323
  Min value:  -2.1781197603632063
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.19306601030720755
  Min value:  -0.03421247164703902
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_test (log_mse scaled):
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.923983794527607
  Min value:  -2.1781197603632063
Epoch 1: Train mse = 0.8519 | Val mse = 0.4163
Epoch 2: Train mse = 0.5989 | Val mse = 0.3806

Parameters used in the single-fit model:
input_dim: 1
output_dim: 1
hidden_layers: 3
dropout: 0.00000000
lr: 0.00100000
epochs: 2
batch_size: 16
device: mps
verbose: True
no_tasks: 1
pred_len: 60
l2_weight: 0.00001000
hidden_dim: 16
patience: 5
min_epochs: 10
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.40419827
1 day(s) RMSE                      : 8.73950258
1 day(s) R2                        : -0.02255430
1 day(s) Pearson r                 : 0.47385780
1 day(s) QLIKE                     : 0.53376280
3 day(s) MAE                       : 2.49176327
3 day(s) RMSE                      : 8.99793337
3 day(s) R2                        : -0.02448651
3 day(s) Pearson r                 : 0.06239985
3 day(s) QLIKE                     : 0.55133113
5 day(s) MAE                       : 2.50199609
5 day(s) RMSE                      : 9.06711009
5 day(s) R2                        : -0.02882063
5 day(s) Pearson r                 : 0.00954652
5 day(s) QLIKE                     : 0.55731272
10 day(s) MAE                      : 2.52208578
10 day(s) RMSE                     : 9.15150782
10 day(s) R2                       : -0.03944146
10 day(s) Pearson r                : 0.00009477
10 day(s) QLIKE                    : 0.56692869
20 day(s) MAE                      : 2.57378236
20 day(s) RMSE                     : 9.22357582
20 day(s) R2                       : -0.05138572
20 day(s) Pearson r                : -0.00331941
20 day(s) QLIKE                    : 0.57098214
full horizon MAE                   : 2.71853261
full horizon RMSE                  : 9.36254621
full horizon R2                    : -0.06982518
full horizon Pearson r             : -0.00747394
full horizon QLIKE                 : 0.57279256

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/test_EURUSD_SLSTM_model_1_task.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=1.35129, max=2.36078

Test on set 5¶

In [ ]:
import os

save_test_LSTM_model_EURUSD_5_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_LSTM_model_5_task.pkl"
)

t5_EURUSD_LSTM_results, t5_EURUSD_LSTM_nested_results, t5_EURUSD_LSTM_best_model, t5_EURUSD_LSTM_best_params, yy = train_and_evaluate_model(
    model_type="Simple_LSTM",
    X_price=eur7_X_price,
    X_time=eur7_X_time,
    y=eur7_y,
    no_tasks=1,
    use_nested_cv=False,
    flatten=False,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,
    y_scale_type="standard",
    save_model_path=save_test_LSTM_model_EURUSD_5_file_path,
    lr=5e-4,
    epochs=50,
    batch_size=16,
    verbose=True,
    time_horizon=20,
    dropout=0.0,
    l2_weight=1e-5,
    patience=10,
    min_epochs=30,
    hidden_dim=32,
    hidden_layers=2,
    min_delta=1e-4,
    single_holdout=False,

    target_mode="log_mse",
    baseline_feature_idx=0,        
    baseline_window=20
)
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  0.0
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  0.0
Checking y_train_core (log_mse scaled):
Shape: (2723, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.998957169490066
  Min value:  -3.7200073694899434
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.40793072075585246
  Min value:  0.00022460106458896233
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.1249999999999998
  Min value:  0.0
Checking y_val (log_mse scaled):
Shape: (302, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3212202110774225
  Min value:  -2.9768947428361368
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4442211143925813
  Min value:  -0.00012220230225973657
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.4999999999999996
  Min value:  0.0
Checking y_test (log_mse scaled):
Shape: (757, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3856309038737162
  Min value:  -5.3523385822659755

Size of the X data for training (core): (2723, 60, 1)
Size of the Y data for training (core): (2723, 20, 1)
Epoch 1: Train mse = 0.9332 | Val mse = 1.1903
Epoch 2: Train mse = 0.8735 | Val mse = 1.3563
Epoch 3: Train mse = 0.7191 | Val mse = 1.0511
Epoch 4: Train mse = 0.5432 | Val mse = 1.2263
Epoch 5: Train mse = 0.4570 | Val mse = 1.3830
Epoch 6: Train mse = 0.4258 | Val mse = 1.4828
Epoch 7: Train mse = 0.4119 | Val mse = 1.6086
Epoch 8: Train mse = 0.4015 | Val mse = 1.5344
Epoch 9: Train mse = 0.3932 | Val mse = 1.6665
Epoch 10: Train mse = 0.3917 | Val mse = 1.9910
Epoch 11: Train mse = 0.3870 | Val mse = 1.6116
Epoch 12: Train mse = 0.3821 | Val mse = 1.7916
Epoch 13: Train mse = 0.3805 | Val mse = 2.0449
Epoch 14: Train mse = 0.3766 | Val mse = 1.6966
Epoch 15: Train mse = 0.3740 | Val mse = 1.9325
Epoch 16: Train mse = 0.3727 | Val mse = 1.8227
Epoch 17: Train mse = 0.3685 | Val mse = 1.8514
Epoch 18: Train mse = 0.3636 | Val mse = 1.8435
Epoch 19: Train mse = 0.3411 | Val mse = 1.0935
Epoch 20: Train mse = 0.2731 | Val mse = 0.8778
Epoch 21: Train mse = 0.2561 | Val mse = 0.8084
Epoch 22: Train mse = 0.2479 | Val mse = 0.9703
Epoch 23: Train mse = 0.2434 | Val mse = 0.6779
Epoch 24: Train mse = 0.2389 | Val mse = 0.7215
Epoch 25: Train mse = 0.2356 | Val mse = 1.0828
Epoch 26: Train mse = 0.2339 | Val mse = 0.9044
Epoch 27: Train mse = 0.2324 | Val mse = 0.6572
Epoch 28: Train mse = 0.2305 | Val mse = 0.8841
Epoch 29: Train mse = 0.2290 | Val mse = 0.7275
Epoch 30: Train mse = 0.2284 | Val mse = 0.8457
Epoch 31: Train mse = 0.2251 | Val mse = 0.6612
Epoch 32: Train mse = 0.2253 | Val mse = 0.6714
Epoch 33: Train mse = 0.2233 | Val mse = 0.5662
Epoch 34: Train mse = 0.2214 | Val mse = 0.7252
Epoch 35: Train mse = 0.2206 | Val mse = 0.7690
Epoch 36: Train mse = 0.2196 | Val mse = 0.8389
Epoch 37: Train mse = 0.2162 | Val mse = 0.8236
Epoch 38: Train mse = 0.2181 | Val mse = 0.8312
Epoch 39: Train mse = 0.2138 | Val mse = 0.7495
Epoch 40: Train mse = 0.2109 | Val mse = 0.8183
Epoch 41: Train mse = 0.2100 | Val mse = 0.8247
Epoch 42: Train mse = 0.2097 | Val mse = 1.0023
Epoch 43: Train mse = 0.2089 | Val mse = 1.0293
Early stopping triggered at epoch 43.

Parameters used in the single-fit model:
input_dim: 1
output_dim: 1
hidden_layers: 2
dropout: 0.00000000
lr: 0.00050000
epochs: 50
batch_size: 16
device: mps
verbose: True
no_tasks: 1
pred_len: 20
l2_weight: 0.00001000
hidden_dim: 32
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.10193652
1 day(s) RMSE                      : 0.18510868
1 day(s) R2                        : 0.24928355
1 day(s) Pearson r                 : 0.50078110
1 day(s) QLIKE                     : 0.40834671
3 day(s) MAE                       : 0.10287788
3 day(s) RMSE                      : 0.18729410
3 day(s) R2                        : 0.22552363
3 day(s) Pearson r                 : 0.47827404
3 day(s) QLIKE                     : 0.46187840
5 day(s) MAE                       : 0.10268911
5 day(s) RMSE                      : 0.18814331
5 day(s) R2                        : 0.20960387
5 day(s) Pearson r                 : 0.46310211
5 day(s) QLIKE                     : 0.47671554
10 day(s) MAE                      : 0.10412948
10 day(s) RMSE                     : 0.19223949
10 day(s) R2                       : 0.16693590
10 day(s) Pearson r                : 0.41922865
10 day(s) QLIKE                    : 0.54585040
20 day(s) MAE                      : 0.10518209
20 day(s) RMSE                     : 0.19491218
20 day(s) R2                       : 0.13351054
20 day(s) Pearson r                : 0.38140265
20 day(s) QLIKE                    : 0.62345596
full horizon MAE                   : 0.10518209
full horizon RMSE                  : 0.19491218
full horizon R2                    : 0.13351054
full horizon Pearson r             : 0.38140265
full horizon QLIKE                 : 0.62345596

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00676322, max=0.993659

Test on 9¶

In [ ]:
import os

save_test_LSTM_model_EURUSD_9_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_LSTM_model_9_task.pkl"
)

t9_EURUSD_LSTM_results, t9_EURUSD_LSTM_nested_results, t9_EURUSD_LSTM_best_model, t9_EURUSD_LSTM_best_params, yy = train_and_evaluate_model(
    model_type="Simple_LSTM",
    X_price=eur9_X_price,
    X_time=eur9_X_time,
    y=eur9_y,
    no_tasks=eur9_y.shape[2],
    use_nested_cv=False,
    flatten=False,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,
    y_scale_type="standard",
    save_model_path=save_test_LSTM_model_EURUSD_5_file_path,
    lr=5e-4,
    epochs=50,
    batch_size=16,
    verbose=True,
    time_horizon=20,
    dropout=0.0,
    l2_weight=1e-5,
    patience=10,
    min_epochs=30,
    hidden_dim=32,
    hidden_layers=2,
    min_delta=1e-4,
    single_holdout=False,

    target_mode="log_mse",
    baseline_feature_idx=0,          
    baseline_window=20
)
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 20
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525526
  Min value:  -1.1318676431757393
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.2941918291474397
  Min value:  -3.7200073694899167
Checking X_price_val:
Shape: (302, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  64331685768.90831
  Min value:  -1.0471963706385605
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.321220211077452
  Min value:  -2.97689474283611
Checking X_price_test:
Shape: (757, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144347
  Min value:  -0.8511164778727046
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3856309038737455
  Min value:  -5.35233858226595
Epoch 1: Train mse = 0.6468 | Val mse = 0.5773
Epoch 2: Train mse = 0.4624 | Val mse = 0.6031
Epoch 3: Train mse = 0.4480 | Val mse = 0.6352
Epoch 4: Train mse = 0.4230 | Val mse = 0.6680
Epoch 5: Train mse = 0.3322 | Val mse = 0.7117
Epoch 6: Train mse = 0.2521 | Val mse = 0.6238
Epoch 7: Train mse = 0.2214 | Val mse = 0.5920
Epoch 8: Train mse = 0.2054 | Val mse = 0.5598
Epoch 9: Train mse = 0.1655 | Val mse = 0.4524
Epoch 10: Train mse = 0.1461 | Val mse = 0.4780
Epoch 11: Train mse = 0.1374 | Val mse = 0.4302
Epoch 12: Train mse = 0.1318 | Val mse = 0.3833
Epoch 13: Train mse = 0.1272 | Val mse = 0.4426
Epoch 14: Train mse = 0.1247 | Val mse = 0.3116
Epoch 15: Train mse = 0.1227 | Val mse = 0.3783
Epoch 16: Train mse = 0.1225 | Val mse = 0.3825
Epoch 17: Train mse = 0.1207 | Val mse = 0.4912
Epoch 18: Train mse = 0.1188 | Val mse = 0.3433
Epoch 19: Train mse = 0.1191 | Val mse = 0.3788
Epoch 20: Train mse = 0.1177 | Val mse = 0.3572
Epoch 21: Train mse = 0.1175 | Val mse = 0.3651
Epoch 22: Train mse = 0.1163 | Val mse = 0.3483
Epoch 23: Train mse = 0.1153 | Val mse = 0.4938
Epoch 24: Train mse = 0.1156 | Val mse = 0.4594
Epoch 25: Train mse = 0.1148 | Val mse = 0.4525
Epoch 26: Train mse = 0.1138 | Val mse = 0.3970
Epoch 27: Train mse = 0.1129 | Val mse = 0.3589
Epoch 28: Train mse = 0.1125 | Val mse = 0.4284
Epoch 29: Train mse = 0.1120 | Val mse = 0.4249
Epoch 30: Train mse = 0.1115 | Val mse = 0.4223
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 2
output_dim: 2
hidden_layers: 2
dropout: 0.00000000
lr: 0.00050000
epochs: 50
batch_size: 16
device: mps
verbose: True
no_tasks: 2
pred_len: 20
l2_weight: 0.00001000
hidden_dim: 32
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.09374090
1 day(s) RMSE                      : 0.18784829
1 day(s) R2                        : 0.22689790
1 day(s) Pearson r                 : 0.50586513
1 day(s) QLIKE                     : 0.52450778
3 day(s) MAE                       : 0.09600714
3 day(s) RMSE                      : 0.18754187
3 day(s) R2                        : 0.22347317
3 day(s) Pearson r                 : 0.49688448
3 day(s) QLIKE                     : 0.49576478
5 day(s) MAE                       : 0.09612463
5 day(s) RMSE                      : 0.18644333
5 day(s) R2                        : 0.22382268
5 day(s) Pearson r                 : 0.49338072
5 day(s) QLIKE                     : 0.48746374
10 day(s) MAE                      : 0.09965360
10 day(s) RMSE                     : 0.19063069
10 day(s) R2                       : 0.18082099
10 day(s) Pearson r                : 0.45304345
10 day(s) QLIKE                    : 0.51572708
20 day(s) MAE                      : 0.10650661
20 day(s) RMSE                     : 0.19703115
20 day(s) R2                       : 0.11456821
20 day(s) Pearson r                : 0.39918388
20 day(s) QLIKE                    : 0.60811456
full horizon MAE                   : 0.10650661
full horizon RMSE                  : 0.19703115
full horizon R2                    : 0.11456821
full horizon Pearson r             : 0.39918388
full horizon QLIKE                 : 0.60811456

--- Task 2 ---
1 day(s) MAE                       : 0.04615278
1 day(s) RMSE                      : 0.06330739
1 day(s) R2                        : 0.43849963
1 day(s) Pearson r                 : 0.81162780
1 day(s) QLIKE                     : 0.03394786
3 day(s) MAE                       : 0.04945837
3 day(s) RMSE                      : 0.06864449
3 day(s) R2                        : 0.33814153
3 day(s) Pearson r                 : 0.80222461
3 day(s) QLIKE                     : 0.03652340
5 day(s) MAE                       : 0.05072982
5 day(s) RMSE                      : 0.07041227
5 day(s) R2                        : 0.30520816
5 day(s) Pearson r                 : 0.80100016
5 day(s) QLIKE                     : 0.03782397
10 day(s) MAE                      : 0.05212794
10 day(s) RMSE                     : 0.07247720
10 day(s) R2                       : 0.26692851
10 day(s) Pearson r                : 0.79293533
10 day(s) QLIKE                    : 0.03998561
20 day(s) MAE                      : 0.05433336
20 day(s) RMSE                     : 0.07680407
20 day(s) R2                       : 0.16330013
20 day(s) Pearson r                : 0.76894469
20 day(s) QLIKE                    : 0.04424315
full horizon MAE                   : 0.05433336
full horizon RMSE                  : 0.07680407
full horizon R2                    : 0.16330013
full horizon Pearson r             : 0.76894469
full horizon QLIKE                 : 0.04424315

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/test_EURUSD_LSTM_model_5_task.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00451238, max=1.36377

Y test¶

In [101]:
y_true_t0 = yy[:, 0, 0]
y_pred_t0 = yy[:, 0, 1]

print(f"y_true @t=0  std={np.nanstd(y_true_t0):.6g}, max={np.nanmax(y_true_t0):.6g}, min={np.nanmin(y_true_t0):.6g}")
print(f"y_pred @t=0  std={np.nanstd(y_pred_t0):.6g}, max={np.nanmax(y_pred_t0):.6g}, min={np.nanmin(y_pred_t0):.6g}")
y_true @t=0  std=0.213643, max=2.66457, min=9.56189e-05
y_pred @t=0  std=0.149159, max=1.13892, min=0.00509328

Test 1 task with Vanilla LSTM with CV¶

In [ ]:
import os
import os

save_checkpoint_SLSTM_model_1CV_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "checkpoint_SLSTM_model_1CV_tasks.pkl"
)

save_test_SLSTM_model_1CV_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_SLSTM_model_1CV_tasks.pkl"
)

param_grid_test_SLSTM_1CV_aapl_cv = {
    "dropout": [0.2],
    "lr": [3e-5],
    "batch_size": [128, 256],
    "epochs": [3],
    "resume": [True],
    "verbose": [True]
}

t1CV_SLSTM_results, t1CV_SLSTM_nested_results, t1CV_SLSTM_best_model, t1CV_SLSTM_best_params = train_and_evaluate_model(
    model_type="Simple_LSTM",
    X_price=aapl_X_price,
    X_time=aapl_X_time,
    y=aapl_y,
    no_tasks=1,
    use_nested_cv=True,
    outer_folds=2,
    inner_folds=2,
    flatten=False,
    merge_price_time=True,
    normalize_X=True,
    normalize_Time=False,
    normalize_y=True,
    save_model_path=save_test_SLSTM_model_1CV_file_path,
    lr=1e-5,
    epochs=150,
    batch_size=64,
    verbose=True,
    hidden_layers=24,
    param_grid=param_grid_test_SLSTM_1CV_aapl_cv,
    checkpoint_path= save_checkpoint_SLSTM_model_1CV_file_path
)

All tasks EURUSD task with Vanilla LSTM without CV¶

In [ ]:
import os
save_test_EURUSD_SLSTM_model_9_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_SLSTM_model_9_task.pkl"
)

t9_EURUSD_SLSTM_results, t9_EURUSD_SLSTM_nested_results, t9_EURUSD_SLSTM_best_model, t9_EURUSD_SLSTM_best_params = train_and_evaluate_model(
    model_type="Simple_LSTM",
    X_price=eur2_X_price,
    X_time=eur2_X_time,
    y=eur2_y,
    no_tasks=eur2_y.shape[2],
    use_nested_cv=False,
    flatten=False,
    merge_price_time=True,
    normalize_X=True,
    normalize_Time=False,
    normalize_y=True,
    save_model_path=save_test_EURUSD_SLSTM_model_9_file_path,
    lr=1e-5,
    epochs=2,
    batch_size=32,
    verbose=True,
    hidden_layers=6,
    time_horizon=3
)

OHO with parameter search - Vanila MLP - 1 task - data 3 - horizon 1¶

In [186]:
import os

save_test_OHO_SLSTM_model_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_OHO_LSTM_model.pkl"
)

param_grid = {
    #"lr": [5e-5, 5e-4],
    #"dropout": [0, 0.1],
    #"l2_weight": [5e-5, 5e-4],
    #"batch_size": [256, 512],
    "hidden_layers": [2, 3],
    #"hidden_dim": [64, 256]
}

t1d3_OHO_SLSTM_results, t1d3_OHO_SLSTM_nested_results, t1d3_OHO_SLSTM_best_model, t1d3_OHO_SLSTM_best_params, t1d3_OHO_SLSTM_y_data  = train_and_evaluate_model(

    param_grid=param_grid,

    verbose=True,
    use_nested_cv=False,
    single_holdout=True,

    merge_price_time=True,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,

    # some models might not need this
    min_delta=1e-4,

    # model name for each
    model_type="Simple_LSTM",

    # data for each scenario
    X_price=eur3_X_price,
    X_time=eur3_X_time,
    y=eur3_y,
    
    # specify these for each data
    no_tasks=1,
    flatten=False,
    time_horizon=28,

    # these have to be adjusted for some models
    patience=2,
    epochs=2,
    min_epochs=2,

    # saving model
    save_model_path=save_test_OHO_SMLP_model_file_path,

    # these go into the parameter grid
    lr=1e-5,
    dropout=0,
    l2_weight=1e-4,
    batch_size=64,
    
    hidden_layers=2,
    hidden_dim=16
)
model parameter(s) from the grid will overwrite any overlapping parameters provided directly to this function
Using device: mps
Batch size for y: 3782
Time steps for y: 28
Features for y: 1

Single holdout: one train/test split, param search on a single train/val (early-stop) split.
Using device: mps
Epoch 1: Train loss = 0.2067, Val loss = 0.2756
Epoch 2: Train loss = 0.2040, Val loss = 0.2726
Using device: mps
Epoch 1: Train loss = 0.4229, Val loss = 0.5290
Epoch 2: Train loss = 0.4174, Val loss = 0.5233

Best parameters found in single holdout:
  hidden_layers: 2

Results for the best model from sigle hold out evaluation:

--- Task 1 ---
1 day(s) MAE (log-var)             : 3.60940475
1 day(s) RMSE (log-var)            : 3.83397606
1 day(s) R2 (log-var)              : -7.39862711
1 day(s) MAE (var)                 : 0.18918367
1 day(s) RMSE (var)                : 0.28535484
1 day(s) R2 (var)                  : -0.78400614
1 day(s) QLIKE (var)               : 0.58450556
5 day(s) MAE (log-var)             : 3.70740767
5 day(s) RMSE (log-var)            : 3.92831159
5 day(s) R2 (log-var)              : -7.85309290
5 day(s) MAE (var)                 : 0.18821502
5 day(s) RMSE (var)                : 0.28320721
5 day(s) R2 (var)                  : -0.79093344
5 day(s) QLIKE (var)               : 0.58287868
10 day(s) MAE (log-var)            : 3.71086702
10 day(s) RMSE (log-var)           : 3.93084647
10 day(s) R2 (log-var)             : -7.88733691
10 day(s) MAE (var)                : 0.18771758
10 day(s) RMSE (var)               : 0.28212613
10 day(s) R2 (var)                 : -0.79425193
10 day(s) QLIKE (var)              : 0.57899115
20 day(s) MAE (log-var)            : 3.70735928
20 day(s) RMSE (log-var)           : 3.92562809
20 day(s) R2 (log-var)             : -7.92870530
20 day(s) MAE (var)                : 0.18704653
20 day(s) RMSE (var)               : 0.28075910
20 day(s) R2 (var)                 : -0.79786326
20 day(s) QLIKE (var)              : 0.57321106
full horizon MAE (log-var)         : 3.70624502
full horizon RMSE (log-var)        : 3.92372897
full horizon R2 (log-var)          : -7.95094110
full horizon MAE (var)             : 0.18670276
full horizon RMSE (var)            : 0.28010806
full horizon R2 (var)              : -0.79923600
full horizon QLIKE (var)           : 0.57077142

Best single-holdout model saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/test_OHO_SMLP_model.pkl

KAN models¶

Intall TKAN¶

In [ ]:
!pip install tkan

Vanila KAN¶

Vanila KAN architecture¶

In [123]:
import torch
import torch.nn as nn
from efficient_kan import kan

class SimpleKAN(nn.Module):
    def __init__(self, input_dim, output_dim, no_tasks, hidden_layers = 2, dropout=0.0, hidden_dim = 64, knots=8, spline_power=3,):
        super(SimpleKAN, self).__init__()
        self.no_tasks = no_tasks
        self.hidden_dim = hidden_dim
        self.hidden_layers = hidden_layers
        self.dropout = dropout
        self.knots = knots
        self.spline_power = spline_power
        self.input_dim = input_dim
        self.output_dim = output_dim

        layers = []

        layers.append(
            kan.KANLinear(
            in_features=self.input_dim,
            out_features=self.hidden_dim,
            grid_size=self.knots,
            spline_order=self.spline_power,
            scale_noise=0.1,
            scale_base=1.0,
            scale_spline=1.0,
            #enable_standalone_scale_spline=True,
            base_activation=torch.nn.SiLU,
            grid_eps=0.02,
            grid_range=[-1, 1]
            ))
        #layers.append(nn.LayerNorm(hidden_dim))
        layers.append(nn.Dropout(self.dropout))

        for i in range (self.hidden_layers):
            layers.append(
                kan.KANLinear(
                in_features=self.hidden_dim,
                out_features=self.hidden_dim,
                grid_size=self.knots,
                spline_order=self.spline_power,
                scale_noise=0.1,
                scale_base=1.0,
                scale_spline=1.0,
                #enable_standalone_scale_spline=True,
                base_activation=torch.nn.SiLU,
                grid_eps=0.02,
                grid_range=[-1, 1]
                ))
            #layers.append(nn.LayerNorm(hidden_dim))
            layers.append(nn.Dropout(self.dropout))


        layers.append(
            kan.KANLinear(
            in_features=self.hidden_dim,
            out_features=self.output_dim,
            grid_size=self.knots,
            spline_order=self.spline_power,
            scale_noise=0.1,
            scale_base=1.0,
            scale_spline=1.0,
            #enable_standalone_scale_spline=True,
            base_activation=torch.nn.SiLU,
            grid_eps=0.02,
            grid_range=[-1, 1]
            ))

        self.model = nn.Sequential(*layers)

    def forward(self, x):
        return self.model(x)

Vanila KAN Wrapper¶

In [124]:
from sklearn.base import BaseEstimator, RegressorMixin
import numpy as np
import torch
import torch.nn as nn


class SimpleKANWrapper(BaseEstimator, RegressorMixin):
    def __init__(
        self,
        input_dim=10,
        output_dim=1,
        dropout=0.1,
        lr=1e-3,
        epochs=100,
        batch_size=512,
        device=None,
        verbose=True,
        hidden_layers=6,
        no_tasks=1,
        knots=12,
        spline_power=3,
        l2_weight=1e-4,
        hidden_dim=128,
        patience=10,
        min_epochs=50,
        min_delta=1e-4,
       
        loss_type="gauss_nll_var",  
        target_is_logvar=True,      
        nll_eps=1e-12,               
        clamp_logvar_min=-20.0,     
        clamp_logvar_max=20.0,
        student_df=5.0,              
    ):
        self.input_dim = int(input_dim)
        self.output_dim = int(output_dim)
        self.dropout = float(dropout)
        self.lr = float(lr)
        self.epochs = int(epochs)
        self.batch_size = int(batch_size)
        self.device = device or ("mps" if torch.backends.mps.is_available()
                                 else ("cuda" if torch.cuda.is_available() else "cpu"))
        self.verbose = bool(verbose)
        self.hidden_layers = int(hidden_layers)
        self.no_tasks = int(no_tasks)
        self.knots = int(knots)
        self.spline_power = int(spline_power)
        self.l2_weight = float(l2_weight)
        self.hidden_dim = int(hidden_dim)
        self.patience = int(patience)
        self.min_epochs = int(min_epochs)
        self.min_delta = float(min_delta)


        self.loss_type = str(loss_type)
        self.target_is_logvar = bool(target_is_logvar)
        self.nll_eps = float(nll_eps)
        self.clamp_logvar_min = float(clamp_logvar_min)
        self.clamp_logvar_max = float(clamp_logvar_max)
        self.student_df = float(student_df)

        self._build()

        if self.verbose:
            print(f"Using device: {self.device}")

    def _build(self):
        self.model = SimpleKAN(
            self.input_dim,
            no_tasks=self.no_tasks,
            output_dim=self.output_dim,     
            hidden_layers=self.hidden_layers,
            dropout=self.dropout,
            knots=self.knots,
            spline_power=self.spline_power,
            hidden_dim=self.hidden_dim
        ).to(self.device)

        decay, no_decay = [], []
        for name, p in self.model.named_parameters():
            if not p.requires_grad:
                continue
            name_l = name.lower()
            if name_l.endswith("bias") or "norm" in name_l:
                no_decay.append(p)
            else:
                decay.append(p)

        self.optimizer = torch.optim.AdamW(
            [{"params": decay, "weight_decay": self.l2_weight},
             {"params": no_decay, "weight_decay": 0.0}],
            lr=self.lr
        )
        self._mse = nn.MSELoss()

    def _compute_loss(self, y_hat, y_true):
        if self.loss_type == "mse":
            return self._mse(y_hat, y_true)

        z = torch.clamp(y_hat, self.clamp_logvar_min, self.clamp_logvar_max)


        if self.target_is_logvar:
            v = torch.exp(y_true)
        else:
            v = y_true
        v = torch.clamp(v, min=self.nll_eps)

        if self.loss_type == "gauss_nll_var":
            loss = v * torch.exp(-z) + z
            return loss.mean()

        if self.loss_type == "student_t_nll_var":
            nu = torch.tensor(self.student_df, device=z.device, dtype=z.dtype)
            loss = 0.5 * (nu + 1.0) * torch.log1p(v / (nu * torch.exp(z))) + 0.5 * z
            return loss.mean()

        raise ValueError(f"Unknown loss_type: {self.loss_type}")

    def fit(self, X, y, X_test, y_test):
        if y.ndim != 2:
            raise ValueError(f"y must be 2D (N, output_dim); got {y.shape}")
        if y.shape[1] != self.output_dim:
            raise ValueError(f"y width ({y.shape[1]}) must match output_dim ({self.output_dim})")
        if self.no_tasks < 1:
            raise ValueError("no_tasks must be >= 1")

        if y.shape[1] % self.no_tasks != 0:
            raise ValueError(f"y.shape[1] ({y.shape[1]}) must be divisible by no_tasks ({self.no_tasks})")
        task_output_dim = y.shape[1] // self.no_tasks  # typically T

        X = torch.tensor(X, dtype=torch.float32, device=self.device)
        y = torch.tensor(y, dtype=torch.float32, device=self.device)
        X_val = torch.tensor(X_test, dtype=torch.float32, device=self.device)
        y_val = torch.tensor(y_test, dtype=torch.float32, device=self.device)

        loader = torch.utils.data.DataLoader(
            torch.utils.data.TensorDataset(X, y),
            batch_size=self.batch_size,
            shuffle=True
        )

        best_val = float("inf")
        best_state = None
        no_imp = 0

        for epoch in range(1, self.epochs + 1):
            self.model.train()
            running = 0.0
            n_batches = 0
            for xb, yb in loader:
                self.optimizer.zero_grad(set_to_none=True)
                preds = self.model(xb)                 # (N, D)
                loss = self._compute_loss(preds, yb)
                loss.backward()
                self.optimizer.step()
                running += float(loss.detach().cpu())
                n_batches += 1
            train_loss = running / max(1, n_batches)

            self.model.eval()
            with torch.no_grad():
                vpreds = self.model(X_val)            # (N, D)
                vpreds_task0 = vpreds[:, :task_output_dim]
                y_val_task0 = y_val[:, :task_output_dim]
                vloss = float(self._compute_loss(vpreds_task0, y_val_task0).detach().cpu())

            if self.verbose:
                print(f"Epoch {epoch}: Train {self.loss_type} = {train_loss:.4f} | Val {self.loss_type} = {vloss:.4f}")

            if (best_val - vloss) > self.min_delta:
                best_val = vloss
                no_imp = 0
                best_state = {k: v.detach().cpu() for k, v in self.model.state_dict().items()}
            else:
                no_imp += 1

            if no_imp >= self.patience and epoch >= self.min_epochs:
                if self.verbose:
                    print(f"Early stopping triggered at epoch {epoch}.")
                break

        if best_state is not None:
            self.model.load_state_dict(best_state)

        return self

    def predict(self, X):
        self.model.eval()
        X = torch.tensor(X, dtype=torch.float32, device=self.device)
        with torch.no_grad():
            preds = self.model(X)
        return preds.detach().cpu().numpy()

    def get_params(self, deep=True):
        return {
            "input_dim": self.input_dim,
            "output_dim": self.output_dim,
            "hidden_layers": self.hidden_layers,
            "dropout": self.dropout,
            "lr": self.lr,
            "epochs": self.epochs,
            "batch_size": self.batch_size,
            "device": self.device,
            "verbose": self.verbose,
            "no_tasks": self.no_tasks,
            "knots": self.knots,
            "spline_power": self.spline_power,
            "l2_weight": self.l2_weight,
            "hidden_dim": self.hidden_dim,
            "patience": self.patience,
            "min_epochs": self.min_epochs,
            "min_delta": self.min_delta,
            "loss_type": self.loss_type,
            "target_is_logvar": self.target_is_logvar,
            "nll_eps": self.nll_eps,
            "clamp_logvar_min": self.clamp_logvar_min,
            "clamp_logvar_max": self.clamp_logvar_max,
            "student_df": self.student_df,
        }

    def set_params(self, **params):
        arch_keys = {
            "input_dim", "output_dim", "hidden_layers", "hidden_dim",
            "dropout", "no_tasks", "knots", "spline_power", "device"
        }
        need_rebuild = any(k in arch_keys for k in params)

        for k, v in params.items():
            if hasattr(self, k):
                setattr(self, k, v)

        if need_rebuild:
            self._build()
        else:
            for g in self.optimizer.param_groups:
                g["lr"] = self.lr
            if len(self.optimizer.param_groups) > 0:
                self.optimizer.param_groups[0]["weight_decay"] = self.l2_weight

        return self

Tasks 1 EURUSD with Vanila KAN without CV¶

In [788]:
import os
save_test_EURUSD_SKAN_model_1_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_SKAN_model_1_task.pkl"
)

t1_EURUSD_SKAN_results, t1_EURUSD_SKAN_nested_results, t1_EURUSD_SKAN_best_model, t1_EURUSD_SKAN_best_params,_ = train_and_evaluate_model(
    model_type="Simple_KAN",
    X_price=eur2_X_price,
    X_time=eur2_X_time,
    y=eur2_y,
    no_tasks=1,
    use_nested_cv=False,
    flatten=True,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,
    save_model_path=save_test_EURUSD_SKAN_model_1_file_path,
    lr=1e-3,
    epochs=50,
    batch_size=512,
    verbose=True,
    time_horizon=28,
    dropout=0,
    l2_weight=1e-5,
    patience=10,
    min_epochs=90,
    hidden_dim=128,
    hidden_layers=3
)
[mode=log_var_ratio] loss_type=mse, target_is_logvar=False, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 28
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525526
  Min value:  -3.1812809750988515
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_var_ratio scaled):
Shape: (2723, 28, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.6528709038839593
  Min value:  -4.601403791417997
Checking X_price_val:
Shape: (302, 60, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.40892116517157
  Min value:  -2.633090998920803
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_var_ratio scaled):
Shape: (302, 28, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.4683943468351024
  Min value:  -4.156376337982124
Checking X_price_test:
Shape: (757, 60, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144347
  Min value:  -2.633090998920803
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_var_ratio scaled):
Shape: (757, 28, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.7701377027111294
  Min value:  -4.754042124912198
Epoch 1: Train mse = 0.9924 | Val mse = 1.0072
Epoch 2: Train mse = 0.9372 | Val mse = 0.8234
Epoch 3: Train mse = 0.8682 | Val mse = 0.7570
Epoch 4: Train mse = 0.8149 | Val mse = 0.7719
Epoch 5: Train mse = 0.7645 | Val mse = 0.7058
Epoch 6: Train mse = 0.7128 | Val mse = 0.6655
Epoch 7: Train mse = 0.6795 | Val mse = 0.6905
Epoch 8: Train mse = 0.6496 | Val mse = 0.7072
Epoch 9: Train mse = 0.6231 | Val mse = 0.6724
Epoch 10: Train mse = 0.5999 | Val mse = 0.6731
Epoch 11: Train mse = 0.5658 | Val mse = 0.6215
Epoch 12: Train mse = 0.5430 | Val mse = 0.6307
Epoch 13: Train mse = 0.5225 | Val mse = 0.6007
Epoch 14: Train mse = 0.5026 | Val mse = 0.6094
Epoch 15: Train mse = 0.4805 | Val mse = 0.5838
Epoch 16: Train mse = 0.4492 | Val mse = 0.5407
Epoch 17: Train mse = 0.4152 | Val mse = 0.5286
Epoch 18: Train mse = 0.3829 | Val mse = 0.4919
Epoch 19: Train mse = 0.3443 | Val mse = 0.4798
Epoch 20: Train mse = 0.3084 | Val mse = 0.4471
Epoch 21: Train mse = 0.2696 | Val mse = 0.4048
Epoch 22: Train mse = 0.2365 | Val mse = 0.3765
Epoch 23: Train mse = 0.2097 | Val mse = 0.3457
Epoch 24: Train mse = 0.1913 | Val mse = 0.3407
Epoch 25: Train mse = 0.1800 | Val mse = 0.3334
Epoch 26: Train mse = 0.1734 | Val mse = 0.3144
Epoch 27: Train mse = 0.1671 | Val mse = 0.3215
Epoch 28: Train mse = 0.1604 | Val mse = 0.3070
Epoch 29: Train mse = 0.1563 | Val mse = 0.3100
Epoch 30: Train mse = 0.1553 | Val mse = 0.3185
Epoch 31: Train mse = 0.1559 | Val mse = 0.3018
Epoch 32: Train mse = 0.1508 | Val mse = 0.3151
Epoch 33: Train mse = 0.1468 | Val mse = 0.3051
Epoch 34: Train mse = 0.1467 | Val mse = 0.3060
Epoch 35: Train mse = 0.1430 | Val mse = 0.3042
Epoch 36: Train mse = 0.1405 | Val mse = 0.3146
Epoch 37: Train mse = 0.1393 | Val mse = 0.3062
Epoch 38: Train mse = 0.1383 | Val mse = 0.3085
Epoch 39: Train mse = 0.1379 | Val mse = 0.3094
Epoch 40: Train mse = 0.1366 | Val mse = 0.3117
Epoch 41: Train mse = 0.1334 | Val mse = 0.3116
Epoch 42: Train mse = 0.1336 | Val mse = 0.3129
Epoch 43: Train mse = 0.1335 | Val mse = 0.3193
Epoch 44: Train mse = 0.1321 | Val mse = 0.3152
Epoch 45: Train mse = 0.1296 | Val mse = 0.3200
Epoch 46: Train mse = 0.1303 | Val mse = 0.3162
Epoch 47: Train mse = 0.1307 | Val mse = 0.3096
Epoch 48: Train mse = 0.1281 | Val mse = 0.3133
Epoch 49: Train mse = 0.1285 | Val mse = 0.3206
Epoch 50: Train mse = 0.1266 | Val mse = 0.3233

Parameters used in the single-fit model:
input_dim: 360
output_dim: 28
hidden_layers: 3
dropout: 0.00000000
lr: 0.00100000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 8
spline_power: 3
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 90
min_delta: 0.00010000
loss_type: mse
target_is_logvar: False
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.14432599
1 day(s) RMSE                      : 0.23817538
1 day(s) R2                        : -0.24284247
1 day(s) Pearson r                 : 0.25984631
1 day(s) QLIKE                     : 0.53811900
3 day(s) MAE                       : 0.15167007
3 day(s) RMSE                      : 0.25033049
3 day(s) R2                        : -0.38352644
3 day(s) Pearson r                 : 0.21167605
3 day(s) QLIKE                     : 0.61921456
5 day(s) MAE                       : 0.15322999
5 day(s) RMSE                      : 0.25113809
5 day(s) R2                        : -0.40829107
5 day(s) Pearson r                 : 0.21530852
5 day(s) QLIKE                     : 0.60976263
10 day(s) MAE                      : 0.16571745
10 day(s) RMSE                     : 0.26692486
10 day(s) R2                       : -0.60609502
10 day(s) Pearson r                : 0.19253859
10 day(s) QLIKE                    : 0.65281540
20 day(s) MAE                      : 0.18666096
20 day(s) RMSE                     : 0.30214202
20 day(s) R2                       : -1.08212877
20 day(s) Pearson r                : 0.16535812
20 day(s) QLIKE                    : 0.68918064
full horizon MAE                   : 0.19926700
full horizon RMSE                  : 0.32702853
full horizon R2                    : -1.45247494
full horizon Pearson r             : 0.15008655
full horizon QLIKE                 : 0.70562558

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/test_EURUSD_SKAN_model_1_task.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00310523, max=2.49799

Tasks 1 EURUSD 3 with Vanila KAN without CV¶

In [98]:
import os
save_test_EURUSD_SKAN_model_1_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_SKAN_model_1_task.pkl"
)

t1_EURUSD_SKAN_results, t1_EURUSD_SKAN_nested_results, t1_EURUSD_SKAN_best_model, t1_EURUSD_SKAN_best_params, test_y_data = train_and_evaluate_model(
    model_type="Simple_KAN",
    X_price=eur5_X_price,
    X_time=eur5_X_time,
    y=eur5_y,
    no_tasks=1,
    use_nested_cv=False,
    flatten=True,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=False,
    normalize_y=True,
    save_model_path=save_test_EURUSD_SKAN_model_1_file_path,
    lr=5e-3,
    epochs=50,
    batch_size=64,
    verbose=True,
    time_horizon=20,
    hidden_layers = 2,
    l2_weight=1e-4,
    dropout=0,
    hidden_dim=64,
    knots=8,
    spline_power=5,
    min_delta=1e-3,
    min_epochs=30,
    patience=10,
    target_mode="log_mse")
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  0.0
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_train_core (log_mse scaled):
Shape: (2723, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.998957169490066
  Min value:  -3.7200073694899434
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.40793072075585246
  Min value:  0.00022460106458896233
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_val (log_mse scaled):
Shape: (302, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3212202110774225
  Min value:  -2.9768947428361368
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4442211143925813
  Min value:  -0.00012220230225973657
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_test (log_mse scaled):
Shape: (757, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3856309038737162
  Min value:  -5.3523385822659755

Size of the X data for training (core): (2723, 60)
Size of the Y data for training (core): (2723, 20)
Epoch 1: Train mse = 0.9124 | Val mse = 0.8917
Epoch 2: Train mse = 0.7542 | Val mse = 0.6721
Epoch 3: Train mse = 0.5237 | Val mse = 0.5092
Epoch 4: Train mse = 0.4263 | Val mse = 0.4890
Epoch 5: Train mse = 0.4032 | Val mse = 0.4653
Epoch 6: Train mse = 0.3846 | Val mse = 0.4604
Epoch 7: Train mse = 0.3755 | Val mse = 0.4313
Epoch 8: Train mse = 0.3559 | Val mse = 0.4283
Epoch 9: Train mse = 0.3431 | Val mse = 0.4341
Epoch 10: Train mse = 0.3385 | Val mse = 0.4018
Epoch 11: Train mse = 0.3312 | Val mse = 0.3959
Epoch 12: Train mse = 0.3298 | Val mse = 0.4230
Epoch 13: Train mse = 0.3139 | Val mse = 0.3809
Epoch 14: Train mse = 0.3104 | Val mse = 0.3866
Epoch 15: Train mse = 0.3121 | Val mse = 0.3854
Epoch 16: Train mse = 0.2973 | Val mse = 0.3705
Epoch 17: Train mse = 0.2914 | Val mse = 0.3943
Epoch 18: Train mse = 0.2847 | Val mse = 0.3766
Epoch 19: Train mse = 0.2865 | Val mse = 0.3950
Epoch 20: Train mse = 0.2843 | Val mse = 0.3683
Epoch 21: Train mse = 0.2739 | Val mse = 0.3815
Epoch 22: Train mse = 0.2758 | Val mse = 0.3941
Epoch 23: Train mse = 0.2762 | Val mse = 0.3779
Epoch 24: Train mse = 0.2681 | Val mse = 0.3984
Epoch 25: Train mse = 0.2612 | Val mse = 0.3973
Epoch 26: Train mse = 0.2587 | Val mse = 0.3908
Epoch 27: Train mse = 0.2590 | Val mse = 0.3958
Epoch 28: Train mse = 0.2598 | Val mse = 0.3875
Epoch 29: Train mse = 0.2530 | Val mse = 0.4029
Epoch 30: Train mse = 0.2519 | Val mse = 0.4106
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 20
hidden_layers: 2
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 64
device: mps
verbose: True
no_tasks: 1
knots: 8
spline_power: 5
l2_weight: 0.00010000
hidden_dim: 64
patience: 10
min_epochs: 30
min_delta: 0.00100000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.08973034
1 day(s) RMSE                      : 0.17806049
1 day(s) R2                        : 0.30536371
1 day(s) Pearson r                 : 0.56662343
1 day(s) QLIKE                     : 0.39630250
3 day(s) MAE                       : 0.09623278
3 day(s) RMSE                      : 0.18561800
3 day(s) R2                        : 0.23932327
3 day(s) Pearson r                 : 0.51445302
3 day(s) QLIKE                     : 0.42344969
5 day(s) MAE                       : 0.09740100
5 day(s) RMSE                      : 0.18788993
5 day(s) R2                        : 0.21173131
5 day(s) Pearson r                 : 0.48806846
5 day(s) QLIKE                     : 0.42115411
10 day(s) MAE                      : 0.09881226
10 day(s) RMSE                     : 0.19051548
10 day(s) R2                       : 0.18181084
10 day(s) Pearson r                : 0.45800072
10 day(s) QLIKE                    : 0.42620625
20 day(s) MAE                      : 0.10040123
20 day(s) RMSE                     : 0.19403940
20 day(s) R2                       : 0.14125309
20 day(s) Pearson r                : 0.42072977
20 day(s) QLIKE                    : 0.45272889
full horizon MAE                   : 0.10040123
full horizon RMSE                  : 0.19403940
full horizon R2                    : 0.14125309
full horizon Pearson r             : 0.42072977
full horizon QLIKE                 : 0.45272889

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00326985, max=1.12003

Data test¶

In [73]:
test_y_data

y_true_t0 = test_y_data[:, 0, 0]
y_pred_t0 = test_y_data[:, 0, 1]

print(f"y_true @t=0  std={np.nanstd(y_true_t0):.6g}, max={np.nanmax(y_true_t0):.6g}, min={np.nanmin(y_true_t0):.6g}")
print(f"y_pred @t=0  std={np.nanstd(y_pred_t0):.6g}, max={np.nanmax(y_pred_t0):.6g}, min={np.nanmin(y_pred_t0):.6g}")
y_true @t=0  std=0.213643, max=2.66457, min=9.56189e-05
y_pred @t=0  std=0.160636, max=2.02879, min=0.00436556

All tasks EURUSD with Vanila KAN without CV¶

In [ ]:
import os
save_test_EURUSD_SKAN_model_all_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_SKAN_model_all_task.pkl"
)

tall_EURUSD_SKAN_results, tall_EURUSD_SKAN_nested_results, tall_EURUSD_SKAN_best_model, tall_EURUSD_SKAN_best_params, _ = train_and_evaluate_model(
    model_type="Simple_KAN",
    X_price=eur2_X_price,
    X_time=eur2_X_time,
    y=eur2_y,
    no_tasks=eur2_y.shape[2],
    use_nested_cv=False,
    flatten=True,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=False,
    normalize_y=True,
    save_model_path=save_test_EURUSD_SKAN_model_all_file_path,
    lr=1e-3,
    epochs=50,
    batch_size=512,
    verbose=True,
    time_horizon=28,
    dropout=0,
    l2_weight=1e-5,
    patience=10,
    min_epochs=90,
    hidden_dim=64,
    hidden_layers=3
)

All tasks EURUSD 3 with Vanila KAN without CV¶

In [520]:
import os
save_test_EURUSD_SKAN_model_all_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_SKAN_model_all_task.pkl"
)

tall_EURUSD_SKAN_results, tall_EURUSD_SKAN_nested_results, tall_EURUSD_SKAN_best_model, tall_EURUSD_SKAN_best_params, _ = train_and_evaluate_model(
    model_type="Simple_KAN",
    X_price=eur8_X_price,
    X_time=eur8_X_time,
    y=eur8_y,
    no_tasks=eur8_y.shape[2],
    use_nested_cv=False,
    flatten=True,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=False,
    normalize_y=True,
    save_model_path=save_test_EURUSD_SKAN_model_all_file_path,
    lr=5e-3,
    epochs=50,
    batch_size=64,
    verbose=True,
    time_horizon=20,
    hidden_layers = 2,
    l2_weight=1e-4,
    dropout=0,
    hidden_dim=64,
    knots=8,
    spline_power=5,
    min_delta=1e-3,
    min_epochs=30,
    patience=10,
    target_mode="identity"
)
[mode=identity] loss_type=mse, target_is_logvar=False, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 20
Features for y: 7

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  0.0
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_train_core (identity scaled):
Shape: (2723, 20, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.303189333927858
  Min value:  -3.212230144094019
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.40793072075585246
  Min value:  0.00022460106458896233
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_val (identity scaled):
Shape: (302, 20, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.453687047909759
  Min value:  -2.193417706702724
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4442211143925813
  Min value:  -0.00012220230225973657
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_test (identity scaled):
Shape: (757, 20, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.179992427084127
  Min value:  -2.6542578890940707
Epoch 1: Train mse = 0.8935 | Val mse = 1.1678
Epoch 2: Train mse = 0.8216 | Val mse = 1.1368
Epoch 3: Train mse = 0.8091 | Val mse = 1.1400
Epoch 4: Train mse = 0.7965 | Val mse = 1.1597
Epoch 5: Train mse = 0.7815 | Val mse = 1.1431
Epoch 6: Train mse = 0.7814 | Val mse = 1.1339
Epoch 7: Train mse = 0.7695 | Val mse = 1.2004
Epoch 8: Train mse = 0.7811 | Val mse = 1.1461
Epoch 9: Train mse = 0.7851 | Val mse = 1.1259
Epoch 10: Train mse = 0.7701 | Val mse = 1.1520
Epoch 11: Train mse = 0.7738 | Val mse = 1.1583
Epoch 12: Train mse = 0.7617 | Val mse = 1.1233
Epoch 13: Train mse = 0.7534 | Val mse = 1.1241
Epoch 14: Train mse = 0.7534 | Val mse = 1.1023
Epoch 15: Train mse = 0.7634 | Val mse = 1.0819
Epoch 16: Train mse = 0.7331 | Val mse = 1.0504
Epoch 17: Train mse = 0.7584 | Val mse = 1.1469
Epoch 18: Train mse = 0.7363 | Val mse = 1.0622
Epoch 19: Train mse = 0.7214 | Val mse = 1.0880
Epoch 20: Train mse = 0.7080 | Val mse = 1.0477
Epoch 21: Train mse = 0.6964 | Val mse = 1.0666
Epoch 22: Train mse = 0.7031 | Val mse = 1.0770
Epoch 23: Train mse = 0.6799 | Val mse = 1.0654
Epoch 24: Train mse = 0.6784 | Val mse = 1.0236
Epoch 25: Train mse = 0.6815 | Val mse = 1.0134
Epoch 26: Train mse = 0.6785 | Val mse = 1.0288
Epoch 27: Train mse = 0.6578 | Val mse = 1.0382
Epoch 28: Train mse = 0.6452 | Val mse = 1.0027
Epoch 29: Train mse = 0.6562 | Val mse = 1.0303
Epoch 30: Train mse = 0.6433 | Val mse = 1.0589
Epoch 31: Train mse = 0.6293 | Val mse = 1.0603
Epoch 32: Train mse = 0.6224 | Val mse = 1.1030
Epoch 33: Train mse = 0.6246 | Val mse = 1.0724
Epoch 34: Train mse = 0.6223 | Val mse = 1.0778
Epoch 35: Train mse = 0.6039 | Val mse = 1.0897
Epoch 36: Train mse = 0.5968 | Val mse = 1.1096
Epoch 37: Train mse = 0.5991 | Val mse = 1.0801
Epoch 38: Train mse = 0.5883 | Val mse = 1.0752
Early stopping triggered at epoch 38.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 140
hidden_layers: 2
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 64
device: mps
verbose: True
no_tasks: 7
knots: 8
spline_power: 5
l2_weight: 0.00010000
hidden_dim: 64
patience: 10
min_epochs: 30
min_delta: 0.00100000
loss_type: mse
target_is_logvar: False
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.11254098
1 day(s) RMSE                      : 0.19165340
1 day(s) R2                        : 0.19526031
1 day(s) Pearson r                 : 0.51116886
1 day(s) QLIKE                     : 0.42413083
3 day(s) MAE                       : 0.12417479
3 day(s) RMSE                      : 0.20267571
3 day(s) R2                        : 0.09309171
3 day(s) Pearson r                 : 0.44920669
3 day(s) QLIKE                     : 0.45873868
5 day(s) MAE                       : 0.12644817
5 day(s) RMSE                      : 0.20509392
5 day(s) R2                        : 0.06076814
5 day(s) Pearson r                 : 0.42823231
5 day(s) QLIKE                     : 0.45942067
10 day(s) MAE                      : 0.12503650
10 day(s) RMSE                     : 0.20685821
10 day(s) R2                       : 0.03541897
10 day(s) Pearson r                : 0.38596066
10 day(s) QLIKE                    : 0.47337746
20 day(s) MAE                      : 0.12285896
20 day(s) RMSE                     : 0.20689343
20 day(s) R2                       : 0.02371019
20 day(s) Pearson r                : 0.35382735
20 day(s) QLIKE                    : 3.40556451
full horizon MAE                   : 0.12285896
full horizon RMSE                  : 0.20689343
full horizon R2                    : 0.02371019
full horizon Pearson r             : 0.35382735
full horizon QLIKE                 : 3.40556451

--- Task 2 ---
1 day(s) MAE                       : 0.05315282
1 day(s) RMSE                      : 0.07595518
1 day(s) R2                        : 0.19173075
1 day(s) Pearson r                 : 0.77066528
1 day(s) QLIKE                     : 0.05236870
3 day(s) MAE                       : 0.05301878
3 day(s) RMSE                      : 0.07587042
3 day(s) R2                        : 0.19146521
3 day(s) Pearson r                 : 0.76953026
3 day(s) QLIKE                     : 0.05235153
5 day(s) MAE                       : 0.05343230
5 day(s) RMSE                      : 0.07645308
5 day(s) R2                        : 0.18087905
5 day(s) Pearson r                 : 0.76383990
5 day(s) QLIKE                     : 0.05331872
10 day(s) MAE                      : 0.05306473
10 day(s) RMSE                     : 0.07677222
10 day(s) R2                       : 0.17747007
10 day(s) Pearson r                : 0.75707023
10 day(s) QLIKE                    : 0.05329293
20 day(s) MAE                      : 0.05500979
20 day(s) RMSE                     : 0.08168575
20 day(s) R2                       : 0.05355829
20 day(s) Pearson r                : 0.74615357
20 day(s) QLIKE                    : 0.05653642
full horizon MAE                   : 0.05500979
full horizon RMSE                  : 0.08168575
full horizon R2                    : 0.05355829
full horizon Pearson r             : 0.74615357
full horizon QLIKE                 : 0.05653642

--- Task 3 ---
1 day(s) MAE                       : 0.03587545
1 day(s) RMSE                      : 0.05230429
1 day(s) R2                        : -0.38877528
1 day(s) Pearson r                 : -0.09050495
1 day(s) QLIKE                     : 10.56899756
3 day(s) MAE                       : 0.03586274
3 day(s) RMSE                      : 0.05203309
3 day(s) R2                        : -0.37406395
3 day(s) Pearson r                 : -0.09311736
3 day(s) QLIKE                     : 10.40308294
5 day(s) MAE                       : 0.03597379
5 day(s) RMSE                      : 0.05208722
5 day(s) R2                        : -0.37599864
5 day(s) Pearson r                 : -0.09817719
5 day(s) QLIKE                     : 10.40219326
10 day(s) MAE                      : 0.03617650
10 day(s) RMSE                     : 0.05224649
10 day(s) R2                       : -0.38254397
10 day(s) Pearson r                : -0.10476950
10 day(s) QLIKE                    : 24.71630485
20 day(s) MAE                      : 0.03619907
20 day(s) RMSE                     : 0.05214390
20 day(s) R2                       : -0.37618254
20 day(s) Pearson r                : -0.09062318
20 day(s) QLIKE                    : 24.04990302
full horizon MAE                   : 0.03619907
full horizon RMSE                  : 0.05214390
full horizon R2                    : -0.37618254
full horizon Pearson r             : -0.09062318
full horizon QLIKE                 : 24.04990302

--- Task 4 ---
1 day(s) MAE                       : 0.27765524
1 day(s) RMSE                      : 0.38218691
1 day(s) R2                        : -0.08138959
1 day(s) Pearson r                 : -0.01599706
1 day(s) QLIKE                     : 3.34360586
3 day(s) MAE                       : 0.28427266
3 day(s) RMSE                      : 0.38173683
3 day(s) R2                        : -0.07873097
3 day(s) Pearson r                 : -0.00282762
3 day(s) QLIKE                     : 3.34474135
5 day(s) MAE                       : 0.28301028
5 day(s) RMSE                      : 0.38484701
5 day(s) R2                        : -0.09612974
5 day(s) Pearson r                 : -0.02413473
5 day(s) QLIKE                     : 3.34639176
10 day(s) MAE                      : 0.28184384
10 day(s) RMSE                     : 0.38573416
10 day(s) R2                       : -0.10062207
10 day(s) Pearson r                : -0.02143539
10 day(s) QLIKE                    : 3.34714894
20 day(s) MAE                      : 0.28193405
20 day(s) RMSE                     : 0.38496422
20 day(s) R2                       : -0.09579254
20 day(s) Pearson r                : -0.01470628
20 day(s) QLIKE                    : 3.34648959
full horizon MAE                   : 0.28193405
full horizon RMSE                  : 0.38496422
full horizon R2                    : -0.09579254
full horizon Pearson r             : -0.01470628
full horizon QLIKE                 : 3.34648959

--- Task 5 ---
1 day(s) MAE                       : 0.04347799
1 day(s) RMSE                      : 0.05762496
1 day(s) R2                        : -0.18857014
1 day(s) Pearson r                 : -0.06318965
1 day(s) QLIKE                     : 23.18812761
3 day(s) MAE                       : 0.04602275
3 day(s) RMSE                      : 0.05879286
3 day(s) R2                        : -0.23705028
3 day(s) Pearson r                 : -0.08333902
3 day(s) QLIKE                     : 23.29301832
5 day(s) MAE                       : 0.04632597
5 day(s) RMSE                      : 0.05938474
5 day(s) R2                        : -0.26174065
5 day(s) Pearson r                 : -0.09853643
5 day(s) QLIKE                     : 22.78345619
10 day(s) MAE                      : 0.04612268
10 day(s) RMSE                     : 0.05952563
10 day(s) R2                       : -0.26693946
10 day(s) Pearson r                : -0.09596024
10 day(s) QLIKE                    : 22.46415206
20 day(s) MAE                      : 0.04644178
20 day(s) RMSE                     : 0.05946367
20 day(s) R2                       : -0.26352270
20 day(s) Pearson r                : -0.09378716
20 day(s) QLIKE                    : 22.51881946
full horizon MAE                   : 0.04644178
full horizon RMSE                  : 0.05946367
full horizon R2                    : -0.26352270
full horizon Pearson r             : -0.09378716
full horizon QLIKE                 : 22.51881946

--- Task 6 ---
1 day(s) MAE                       : 0.01088549
1 day(s) RMSE                      : 0.01361418
1 day(s) R2                        : -0.57315512
1 day(s) Pearson r                 : 0.56862277
1 day(s) QLIKE                     : 14.77846520
3 day(s) MAE                       : 0.01093249
3 day(s) RMSE                      : 0.01366028
3 day(s) R2                        : -0.59907409
3 day(s) Pearson r                 : 0.56393234
3 day(s) QLIKE                     : 14.78101499
5 day(s) MAE                       : 0.01103161
5 day(s) RMSE                      : 0.01377561
5 day(s) R2                        : -0.63785847
5 day(s) Pearson r                 : 0.55943659
5 day(s) QLIKE                     : 14.79182668
10 day(s) MAE                      : 0.01106192
10 day(s) RMSE                     : 0.01384776
10 day(s) R2                       : -0.68891551
10 day(s) Pearson r                : 0.54625433
10 day(s) QLIKE                    : 14.87080609
20 day(s) MAE                      : 0.01114399
20 day(s) RMSE                     : 0.01411499
20 day(s) R2                       : -0.85328251
20 day(s) Pearson r                : 0.51551530
20 day(s) QLIKE                    : 14.97944860
full horizon MAE                   : 0.01114399
full horizon RMSE                  : 0.01411499
full horizon R2                    : -0.85328251
full horizon Pearson r             : 0.51551530
full horizon QLIKE                 : 14.97944860

--- Task 7 ---
1 day(s) MAE                       : 5.54079197
1 day(s) RMSE                      : 10.48595817
1 day(s) R2                        : -70.95962683
1 day(s) Pearson r                 : -0.22251553
1 day(s) QLIKE                     : 0.25166808
3 day(s) MAE                       : 5.46156698
3 day(s) RMSE                      : 10.29001352
3 day(s) R2                        : -68.27097533
3 day(s) Pearson r                 : -0.22603855
3 day(s) QLIKE                     : 7.29050890
5 day(s) MAE                       : 5.45213765
5 day(s) RMSE                      : 10.24699458
5 day(s) R2                        : -67.65661087
5 day(s) Pearson r                 : -0.22498053
5 day(s) QLIKE                     : 7.97528012
10 day(s) MAE                      : 5.34443041
10 day(s) RMSE                     : 9.89196082
10 day(s) R2                       : -62.84563462
10 day(s) Pearson r                : -0.20824670
10 day(s) QLIKE                    : 8.09185362
20 day(s) MAE                      : 5.12326207
20 day(s) RMSE                     : 9.24562318
20 day(s) R2                       : -54.76894332
20 day(s) Pearson r                : -0.14075054
20 day(s) QLIKE                    : 8.01082255
full horizon MAE                   : 5.12326207
full horizon RMSE                  : 9.24562318
full horizon R2                    : -54.76894332
full horizon Pearson r             : -0.14075054
full horizon QLIKE                 : 8.01082255

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/test_EURUSD_SKAN_model_all_task.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=-0.00694153, max=0.977942

OHO with parameter search - Vanila KAN - 1 task - data 3 - horizon 1¶

In [ ]:
import os

save_test_OHO_SKAN_model_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_OHO_SKAN_model.pkl"
)

param_grid = {
    "lr": [5e-5, 1e-3],
    #"dropout": [0, 0.1],
    #"l2_weight": [5e-5, 5e-4],
    "batch_size": [256, 512],
    "hidden_layers": [1, 2],
    "hidden_dim": [16, 32],
    "knots": [5, 8],
    "spline_power": [2, 3]
}

t1d3_OHO_SKAN_results, t1d3_OHO_SKAN_nested_results, t1d3_OHO_SKAN_best_model, t1d3_OHO_SKAN_best_params, t1d3_OHO_SKAN_y_data  = train_and_evaluate_model(

    param_grid=param_grid,

    verbose=True,
    use_nested_cv=False,
    single_holdout=True,

    merge_price_time=True,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,

    # some models might not need this
    min_delta=1e-4,

    # model name for each
    model_type="Simple_KAN",

    # data for each scenario
    X_price=eur3_X_price,
    X_time=eur3_X_time,
    y=eur3_y,
    
    # specify these for each data
    no_tasks=1,
    flatten=True,
    time_horizon=1,

    # these have to be adjusted for some models
    patience=10,
    epochs=100,
    min_epochs=50,

    # saving model
    save_model_path=save_test_OHO_SMLP_model_file_path,

    # these go into the parameter grid
    lr=1e-5,
    dropout=0,
    l2_weight=1e-4,
    batch_size=64,
    
    hidden_layers=3,
    hidden_dim=256,

    # specific to KAN:
    knots=12,
    spline_power=3
)

Custom KAN¶

Custom KAN Architecture¶

In [832]:
import torch
import torch.nn as nn
import efficient_kan as kan


class HierarchicalMultiTaskKAN(nn.Module):
    def __init__(
        self,
        input_dim: int,
        output_dim: int,
        no_tasks: int,
        knots: int = 8,
        spline_power: int = 4,
        dropout: float = 0.0,
        hidden_layers: int = 2,
        hidden_dim: int = 64,
        head_hidden_dim: int = 64,
        detach_task2plus: bool = False,
        detach_task1: bool = False,
        detach_aux: bool | None = None,   
    ):
        super().__init__()
        if detach_aux is not None:
            detach_task1 = bool(detach_aux)
        assert output_dim % no_tasks == 0, "output_dim must be divisible by no_tasks"
        assert no_tasks > 1, "no_tasks must be > 1"

        self.input_dim = input_dim
        self.output_dim = output_dim
        self.no_tasks = no_tasks
        self.knots = knots
        self.spline_power = spline_power
        self.dropout = dropout
        self.hidden_layers = hidden_layers
        self.hidden_dim = hidden_dim
        self.head_hidden_dim = head_hidden_dim

        self.task_output_dim = self.output_dim // self.no_tasks

        def build_trunk():
            layers = []
            in_f = self.input_dim
            for _ in range(self.hidden_layers):
                layers.append(
                    kan.KANLinear(
                        in_features=in_f,
                        out_features=self.hidden_dim,
                        grid_size=self.knots,
                        spline_order=self.spline_power,
                        scale_noise=0.1,
                        scale_base=1.0,
                        scale_spline=1.0,
                        base_activation=torch.nn.SiLU,
                        grid_eps=0.02,
                        grid_range=[-1, 1],
                    )
                )
                layers.append(nn.Dropout(self.dropout))
                in_f = self.hidden_dim
            return nn.Sequential(*layers)


        self.trunk_aux   = build_trunk()  
        self.trunk_task1 = build_trunk()  
        self.trunk_main  = build_trunk() 

        # ---- head factory
        def make_head(in_dim: int):
            return nn.ModuleDict({
                "fc1": kan.KANLinear(
                    in_features=in_dim,
                    out_features=self.head_hidden_dim,
                    grid_size=self.knots,
                    spline_order=self.spline_power,
                    scale_noise=0.1,
                    scale_base=1.0,
                    scale_spline=1.0,
                    base_activation=torch.nn.SiLU,
                    grid_eps=0.02,
                    grid_range=[-1, 1],
                ),
                "drop1": nn.Dropout(self.dropout),
                "fc2": kan.KANLinear(
                    in_features=self.head_hidden_dim,
                    out_features=self.head_hidden_dim,
                    grid_size=self.knots,
                    spline_order=self.spline_power,
                    scale_noise=0.1,
                    scale_base=1.0,
                    scale_spline=1.0,
                    base_activation=torch.nn.SiLU,
                    grid_eps=0.02,
                    grid_range=[-1, 1],
                ),
                "drop2": nn.Dropout(self.dropout),
                "out": kan.KANLinear(
                    in_features=self.head_hidden_dim,
                    out_features=self.task_output_dim,
                    grid_size=self.knots,
                    spline_order=self.spline_power,
                    scale_noise=0.1,
                    scale_base=1.0,
                    scale_spline=1.0,
                    base_activation=torch.nn.SiLU,
                    grid_eps=0.02,
                    grid_range=[-1, 1],
                ),
            })

        self.task_layers = nn.ModuleDict()

        for task_id in range(2, self.no_tasks):
            self.task_layers[f"task_{task_id}"] = make_head(self.hidden_dim)

        in_task1 = self.hidden_dim + max(0, (self.no_tasks - 2) * self.head_hidden_dim)
        self.task_layers["task_1"] = make_head(in_task1)

        in_task0 = self.hidden_dim + self.head_hidden_dim
        self.task_layers["task_0"] = make_head(in_task0)

    def forward(
        self,
        x: torch.Tensor,
        *,
        detach_task2plus: bool = False, 
        detach_task1: bool = False,     
        detach_aux: bool | None = None, 
    ) -> torch.Tensor:
        


        xa = self.trunk_aux(x).contiguous()
        x1 = self.trunk_task1(x).contiguous()
        xm = self.trunk_main(x).contiguous()

        outputs = []

        embeds_2plus = []
        for task_id in range(2, self.no_tasks):
            head = self.task_layers[f"task_{task_id}"]
            h = head["drop1"](head["fc1"](xa))
            h = head["drop2"](head["fc2"](h))
            embeds_2plus.append(h)
            y_i = head["out"](h)
            outputs.append((task_id, y_i))


        if embeds_2plus:
            H = torch.cat(embeds_2plus, dim=-1).contiguous()
            if detach_task2plus:
                H = H.detach()
            t1_in = torch.cat([x1, H], dim=-1).contiguous()
        else:
            t1_in = x1

        head1 = self.task_layers["task_1"]
        h1 = head1["drop1"](head1["fc1"](t1_in))
        h1 = head1["drop2"](head1["fc2"](h1))
        y1 = head1["out"](h1)

        t0_in = torch.cat([xm, (h1.detach() if detach_task1 else h1)], dim=-1).contiguous()
        head0 = self.task_layers["task_0"]
        h0 = head0["drop1"](head0["fc1"](t0_in))
        h0 = head0["drop2"](head0["fc2"](h0))
        y0 = head0["out"](h0)

        outputs = [y0, y1] + [y for (_tid, y) in sorted(outputs, key=lambda t: t[0])]
        return torch.cat(outputs, dim=-1).contiguous()

    def set_trainable_parts_KAN(self, phase: int):
        for p in self.parameters():
            p.requires_grad = False

        if phase == 0:
            for p in self.parameters():
                p.requires_grad = True
            return

        if phase == 1:
            for p in self.trunk_aux.parameters():
                p.requires_grad = True
           
            for tid in range(2, self.no_tasks):
                for layer in self.task_layers[f"task_{tid}"].values():
                    if hasattr(layer, "parameters"):
                        for p in layer.parameters():
                            p.requires_grad = True
            
            for layer in self.task_layers["task_1"].values():
                if hasattr(layer, "parameters"):
                    for p in layer.parameters():
                        p.requires_grad = True
            return

        if phase == 2:
            for p in self.trunk_main.parameters():
                p.requires_grad = True
            for layer in self.task_layers["task_0"].values():
                if hasattr(layer, "parameters"):
                    for p in layer.parameters():
                        p.requires_grad = True
            return

        if phase == 3:
            for p in self.trunk_task1.parameters():
                p.requires_grad = True
            for layer in self.task_layers["task_1"].values():
                if hasattr(layer, "parameters"):
                    for p in layer.parameters():
                        p.requires_grad = True
            return

        raise ValueError("phase must be one of {0,1,2,3}")

Custom KAN Wrapper¶

In [833]:
from sklearn.base import BaseEstimator, RegressorMixin
import torch
import torch.nn as nn

class TorchHierarchicalKANWrapper(BaseEstimator, RegressorMixin):
    def __init__(
        self,
        input_dim=10,
        output_dim=2,
        no_tasks=2,
        knots=8,
        spline_power=4,
        hidden_dim=256,
        hidden_layers=2,
        dropout=0.0,

        lr=1e-3,
        l2_weight=1e-4,
        epochs=50,
        min_epochs=50,
        batch_size=512,

       
        loss_type="gmse", 
        target_is_logvar=True,      
        nll_eps=1e-12,
        clamp_logvar_min=-20.0,
        clamp_logvar_max=20.0,
        student_df=5.0,
        
        loss=None,

        patience=10,
        min_delta=1e-4,

        warmup_aux_epochs=15,
        joint_epochs=15,

        device=None,
        verbose=True,
        checkpoint_path=None,
    ):
        assert no_tasks > 1, "Number of tasks must be > 1 for hierarchical KAN."

        self.input_dim = int(input_dim)
        self.output_dim = int(output_dim)
        self.no_tasks = int(no_tasks)
        self.knots = int(knots)
        self.spline_power = int(spline_power)
        self.hidden_dim = int(hidden_dim)
        self.hidden_layers = int(hidden_layers)
        self.dropout = float(dropout)

        self.lr = float(lr)
        self.l2_weight = float(l2_weight)
        self.epochs = int(epochs)
        self.min_epochs = int(min_epochs)
        self.batch_size = int(batch_size)

 
        self.loss_type = str(loss_type if loss is None else loss) 
        self.target_is_logvar = bool(target_is_logvar)
        self.nll_eps = float(nll_eps)
        self.clamp_logvar_min = float(clamp_logvar_min)
        self.clamp_logvar_max = float(clamp_logvar_max)
        self.student_df = float(student_df)

        self.patience = int(patience)
        self.min_delta = float(min_delta)

        self.warmup_aux_epochs = int(warmup_aux_epochs)
        self.joint_epochs = int(joint_epochs)

        self.device = device or ("mps" if torch.backends.mps.is_available()
                                 else ("cuda" if torch.cuda.is_available() else "cpu"))
        self.verbose = bool(verbose)
        self.checkpoint_path = checkpoint_path


        self.model = HierarchicalMultiTaskKAN(
            input_dim=self.input_dim,
            output_dim=self.output_dim,
            no_tasks=self.no_tasks,
            knots=self.knots,
            spline_power=self.spline_power,
            dropout=self.dropout,
            hidden_layers=self.hidden_layers,
            hidden_dim=self.hidden_dim,
        ).to(self.device)

        if self.verbose:
            print(f"Using device: {self.device}")

    def _adamw_param_groups(self):
        decay, no_decay = [], []
        for name, p in self.model.named_parameters():
            if not p.requires_grad:
                continue
            n = name.lower()
            if n.endswith("bias") or "norm" in n or "grid" in n or "knot" in n or "spline" in n:
                no_decay.append(p)
            else:
                decay.append(p)
        return [
            {"params": decay, "weight_decay": self.l2_weight},
            {"params": no_decay, "weight_decay": 0.0},
        ]

    def _compute_loss(self, y_hat, y_true):
        if self.loss_type == "mse":
            return nn.functional.mse_loss(y_hat, y_true, reduction="mean")


        z = torch.clamp(y_hat, self.clamp_logvar_min, self.clamp_logvar_max)


        v = torch.exp(y_true) if self.target_is_logvar else y_true
        v = torch.clamp(v, min=self.nll_eps)

        if self.loss_type == "gauss_nll_var":
            loss = v * torch.exp(-z) + z
            return loss.mean()

        if self.loss_type == "student_t_nll_var":
            nu = torch.tensor(self.student_df, device=z.device, dtype=z.dtype)
            loss = 0.5 * (nu + 1.0) * torch.log1p(v / (nu * torch.exp(z))) + 0.5 * z
            return loss.mean()

        raise ValueError(f"Unknown loss_type: {self.loss_type}")

    def fit(self, X, y, X_val, y_val):
 
        if y.ndim != 2 or y_val.ndim != 2:
            raise ValueError(f"y and y_val must be 2D (N, output_dim); got y={y.shape}, y_val={y_val.shape}")
        if y.shape[1] != self.output_dim or y_val.shape[1] != self.output_dim:
            raise ValueError(f"Last dim of y/y_val must equal output_dim={self.output_dim} "
                             f"(got {y.shape[1]} and {y_val.shape[1]})")
        if self.output_dim % self.no_tasks != 0:
            raise ValueError(f"output_dim ({self.output_dim}) must be divisible by no_tasks ({self.no_tasks})")
        task_out = self.output_dim // self.no_tasks  


        X = torch.as_tensor(X, dtype=torch.float32, device=self.device).contiguous()
        y = torch.as_tensor(y, dtype=torch.float32, device=self.device).contiguous()
        X_val = torch.as_tensor(X_val, dtype=torch.float32, device=self.device).contiguous()
        y_val = torch.as_tensor(y_val, dtype=torch.float32, device=self.device).contiguous()

        optimizer = torch.optim.AdamW(self._adamw_param_groups(), lr=self.lr)

        dataset = torch.utils.data.TensorDataset(X, y)
        loader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size, shuffle=True)

        aux_end = min(self.warmup_aux_epochs, self.epochs)
        joint_end = min(aux_end + self.joint_epochs, self.epochs)

        best_val = float("inf")
        best_state = None
        epochs_no_improve = 0

        for epoch in range(self.epochs):

            if epoch < aux_end:
                phase = 1
            elif epoch < joint_end:
                phase = 0
            else:
                phase = 2

            self.model.set_trainable_parts_KAN(phase)
            self.model.train()

            running = 0.0
            n_batches = 0
            for xb, yb in loader:
                optimizer.zero_grad(set_to_none=True)
                preds = self.model(xb, detach_aux=(phase == 2)).contiguous()

                total = 0.0
                if phase == 1:
  
                    for i in range(1, self.no_tasks):
                        s = i * task_out; e = s + task_out
                        total = total + self._compute_loss(preds[:, s:e], yb[:, s:e])
                elif phase == 0:

                    for i in range(self.no_tasks):
                        s = i * task_out; e = s + task_out
                        total = total + self._compute_loss(preds[:, s:e], yb[:, s:e])
                else:

                    total = total + self._compute_loss(preds[:, 0:task_out], yb[:, 0:task_out])

                total.backward()
                optimizer.step()
                running += float(total.detach().cpu())
                n_batches += 1

            train_loss = running / max(1, n_batches)

            self.model.eval()
            with torch.no_grad():
                val_preds = self.model(X_val, detach_aux=True).contiguous()
                val_loss = float(self._compute_loss(val_preds[:, 0:task_out], y_val[:, 0:task_out]).detach().cpu())

            if self.verbose:
                print(f"Epoch {epoch+1:03d} | phase={phase} | train_loss={train_loss:.4f} | val_main={val_loss:.6f}")

            if (best_val - val_loss) > self.min_delta:
                best_val = val_loss
                epochs_no_improve = 0
                best_state = {k: v.detach().cpu() for k, v in self.model.state_dict().items()}
            else:
                epochs_no_improve += 1
                if epochs_no_improve >= self.patience and (epoch + 1) >= self.min_epochs:
                    if self.verbose:
                        print("Early stopping triggered.")
                    break

        if best_state is not None:
            self.model.load_state_dict(best_state)

        return self

    def predict(self, X):
        self.model.eval()
        X = torch.as_tensor(X, dtype=torch.float32, device=self.device).contiguous()
        with torch.no_grad():
            preds = self.model(X, detach_aux=True).contiguous()
        return preds.detach().cpu().numpy()

    def get_params(self, deep=True):
        return {
            "input_dim": self.input_dim,
            "output_dim": self.output_dim,
            "no_tasks": self.no_tasks,
            "knots": self.knots,
            "spline_power": self.spline_power,
            "hidden_dim": self.hidden_dim,
            "hidden_layers": self.hidden_layers,
            "dropout": self.dropout,
            "lr": self.lr,
            "l2_weight": self.l2_weight,
            "epochs": self.epochs,
            "min_epochs": self.min_epochs,
            "batch_size": self.batch_size,
            "loss_type": self.loss_type,
            "target_is_logvar": self.target_is_logvar,
            "nll_eps": self.nll_eps,
            "clamp_logvar_min": self.clamp_logvar_min,
            "clamp_logvar_max": self.clamp_logvar_max,
            "student_df": self.student_df,
            "patience": self.patience,
            "min_delta": self.min_delta,
            "warmup_aux_epochs": self.warmup_aux_epochs,
            "joint_epochs": self.joint_epochs,
            "device": self.device,
            "verbose": self.verbose,
            "checkpoint_path": self.checkpoint_path,
        }

    def set_params(self, **params):
        arch_keys = {
            "input_dim", "output_dim", "no_tasks", "knots", "spline_power",
            "hidden_dim", "hidden_layers", "dropout", "device"
        }
        need_rebuild = any(k in arch_keys for k in params)

        for k, v in params.items():
            if hasattr(self, k):
                setattr(self, k, v)

        if need_rebuild:
            self.model = HierarchicalMultiTaskKAN(
                input_dim=self.input_dim,
                output_dim=self.output_dim,
                no_tasks=self.no_tasks,
                knots=self.knots,
                spline_power=self.spline_power,
                dropout=self.dropout,
                hidden_layers=self.hidden_layers,
                hidden_dim=self.hidden_dim,
            ).to(self.device)
        else:
            pass

        return self

All tasks EURUSD with Custom KAN without CV¶

In [831]:
import os
save_test_EURUSD_CKAN_model_all_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_CKAN_model_all_task.pkl"
)

tall_EURUSD_CKAN_results, tall_EURUSD_CKAN_nested_results, tall_EURUSD_CKAN_best_model, tall_EURUSD_CKAN_best_params, _ = train_and_evaluate_model(
    model_type="Custom_KAN",
    X_price=eur8_X_price,
    X_time=eur8_X_time,
    y=eur8_y,
    no_tasks=eur8_y.shape[2],
    use_nested_cv=False,
    flatten=True,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=False,
    normalize_y=True,
    save_model_path=save_test_EURUSD_CKAN_model_all_file_path,
    lr=5e-4,
    epochs=2,
    batch_size=64,
    verbose=True,
    time_horizon=60,
    hidden_layers=3,
    dropout=0.01,
    l2_weight=1e-5,
    hidden_dim=128,
    knots=10,
    spline_power=5
)
[mode=log_var_ratio] loss_type=mse, target_is_logvar=False, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 60
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525025
  Min value:  -0.7148707238463011
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_train_core (log_var_ratio scaled):
Shape: (2723, 60, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.682091899029062
  Min value:  -5.349321362921576
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.408921165171427
  Min value:  -0.7103978753601863
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_val (log_var_ratio scaled):
Shape: (302, 60, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.485281447677025
  Min value:  -4.167019125166536
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144182
  Min value:  -0.7173043382357188
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_test (log_var_ratio scaled):
Shape: (757, 60, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.6809168227569553
  Min value:  -4.594266491108728
Epoch 001 | phase=1 | train_loss=4.7375 | val_main=1.047299
Epoch 002 | phase=1 | train_loss=4.0372 | val_main=1.047360

Parameters used in the single-fit model:
input_dim: 60
output_dim: 360
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.01000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 2
min_epochs: 50
batch_size: 64
loss_type: mse
target_is_logvar: False
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.18542461
1 day(s) RMSE                      : 0.45613475
1 day(s) R2                        : -3.55836048
1 day(s) Pearson r                 : 0.36894578
1 day(s) QLIKE                     : 0.73267439
3 day(s) MAE                       : 0.18796719
3 day(s) RMSE                      : 0.46124314
3 day(s) R2                        : -3.69699182
3 day(s) Pearson r                 : 0.33581505
3 day(s) QLIKE                     : 0.74091243
5 day(s) MAE                       : 0.19054060
5 day(s) RMSE                      : 0.46702085
5 day(s) R2                        : -3.87012169
5 day(s) Pearson r                 : 0.30400454
5 day(s) QLIKE                     : 0.75254326
10 day(s) MAE                      : 0.19742627
10 day(s) RMSE                     : 0.47963427
10 day(s) R2                       : -4.18577415
10 day(s) Pearson r                : 0.24793903
10 day(s) QLIKE                    : 0.80287870
20 day(s) MAE                      : 0.20246563
20 day(s) RMSE                     : 0.48874286
20 day(s) R2                       : -4.44811411
20 day(s) Pearson r                : 0.19435343
20 day(s) QLIKE                    : 0.82371550
full horizon MAE                   : 0.20479702
full horizon RMSE                  : 0.49740216
full horizon R2                    : -4.75990181
full horizon Pearson r             : 0.15550393
full horizon QLIKE                 : 0.83855376

--- Task 2 ---
1 day(s) MAE                       : 0.03456308
1 day(s) RMSE                      : 0.05622747
1 day(s) R2                        : -0.60492414
1 day(s) Pearson r                 : -0.12338524
1 day(s) QLIKE                     : 12.64939139
3 day(s) MAE                       : 0.03457554
3 day(s) RMSE                      : 0.05622132
3 day(s) R2                        : -0.60416785
3 day(s) Pearson r                 : -0.09645022
3 day(s) QLIKE                     : 12.65268035
5 day(s) MAE                       : 0.03453973
5 day(s) RMSE                      : 0.05621465
5 day(s) R2                        : -0.60270920
5 day(s) Pearson r                 : -0.10187965
5 day(s) QLIKE                     : 12.64823067
10 day(s) MAE                      : 0.03444604
10 day(s) RMSE                     : 0.05619665
10 day(s) R2                       : -0.59950522
10 day(s) Pearson r                : -0.08481286
10 day(s) QLIKE                    : 12.52097634
20 day(s) MAE                      : 0.03439762
20 day(s) RMSE                     : 0.05618340
20 day(s) R2                       : -0.59766259
20 day(s) Pearson r                : -0.08306283
20 day(s) QLIKE                    : 12.40859400
full horizon MAE                   : 0.03366243
full horizon RMSE                  : 0.05590914
full horizon R2                    : -0.56735527
full horizon Pearson r             : -0.06119730
full horizon QLIKE                 : 12.46081650

--- Task 3 ---
1 day(s) MAE                       : 0.69104914
1 day(s) RMSE                      : 0.80205301
1 day(s) R2                        : -3.76251876
1 day(s) Pearson r                 : 0.13754765
1 day(s) QLIKE                     : 3.88281602
3 day(s) MAE                       : 0.69024720
3 day(s) RMSE                      : 0.80033865
3 day(s) R2                        : -3.74168401
3 day(s) Pearson r                 : 0.13668476
3 day(s) QLIKE                     : 3.88906552
5 day(s) MAE                       : 0.69191772
5 day(s) RMSE                      : 0.80499197
5 day(s) R2                        : -3.79588564
5 day(s) Pearson r                 : 0.13595274
5 day(s) QLIKE                     : 3.86761763
10 day(s) MAE                      : 0.69045463
10 day(s) RMSE                     : 0.80432006
10 day(s) R2                       : -3.78541717
10 day(s) Pearson r                : 0.14297675
10 day(s) QLIKE                    : 3.86424572
20 day(s) MAE                      : 0.69010078
20 day(s) RMSE                     : 0.80128957
20 day(s) R2                       : -3.74751752
20 day(s) Pearson r                : 0.14466326
20 day(s) QLIKE                    : 3.86334197
full horizon MAE                   : 0.69325460
full horizon RMSE                  : 0.80403789
full horizon R2                    : -3.75552235
full horizon Pearson r             : 0.14150736
full horizon QLIKE                 : 3.91305959

--- Task 4 ---
1 day(s) MAE                       : 0.03319704
1 day(s) RMSE                      : 0.06219768
1 day(s) R2                        : -0.38468825
1 day(s) Pearson r                 : -0.13732561
1 day(s) QLIKE                     : 3.86984611
3 day(s) MAE                       : 0.03317330
3 day(s) RMSE                      : 0.06219413
3 day(s) R2                        : -0.38432177
3 day(s) Pearson r                 : -0.13536904
3 day(s) QLIKE                     : 3.83069314
5 day(s) MAE                       : 0.03316404
5 day(s) RMSE                      : 0.06219851
5 day(s) R2                        : -0.38414101
5 day(s) Pearson r                 : -0.13875023
5 day(s) QLIKE                     : 3.82974574
10 day(s) MAE                      : 0.03316043
10 day(s) RMSE                     : 0.06221985
10 day(s) R2                       : -0.38422222
10 day(s) Pearson r                : -0.14857166
10 day(s) QLIKE                    : 3.85084722
20 day(s) MAE                      : 0.03314247
20 day(s) RMSE                     : 0.06222549
20 day(s) R2                       : -0.38361853
20 day(s) Pearson r                : -0.15406077
20 day(s) QLIKE                    : 3.83816777
full horizon MAE                   : 0.03270034
full horizon RMSE                  : 0.06214025
full horizon R2                    : -0.37117286
full horizon Pearson r             : -0.14979936
full horizon QLIKE                 : 3.77332190

--- Task 5 ---
1 day(s) MAE                       : 0.00512759
1 day(s) RMSE                      : 0.00780967
1 day(s) R2                        : -0.75768034
1 day(s) Pearson r                 : 0.16655465
1 day(s) QLIKE                     : 9.36727664
3 day(s) MAE                       : 0.00512759
3 day(s) RMSE                      : 0.00780967
3 day(s) R2                        : -0.75768173
3 day(s) Pearson r                 : 0.14878561
3 day(s) QLIKE                     : 9.28601575
5 day(s) MAE                       : 0.00512760
5 day(s) RMSE                      : 0.00780968
5 day(s) R2                        : -0.75768539
5 day(s) Pearson r                 : 0.14664661
5 day(s) QLIKE                     : 9.15466465
10 day(s) MAE                      : 0.00512759
10 day(s) RMSE                     : 0.00780967
10 day(s) R2                       : -0.75768096
10 day(s) Pearson r                : 0.12695445
10 day(s) QLIKE                    : 9.28828002
20 day(s) MAE                      : 0.00513559
20 day(s) RMSE                     : 0.00781229
20 day(s) R2                       : -0.76094048
20 day(s) Pearson r                : 0.11247400
20 day(s) QLIKE                    : 10.14935889
full horizon MAE                   : 0.00524527
full horizon RMSE                  : 0.00787223
full horizon R2                    : -0.79837706
full horizon Pearson r             : 0.03871303
full horizon QLIKE                 : 10.36337195

--- Task 6 ---
1 day(s) MAE                       : 7.46225560
1 day(s) RMSE                      : 11.25221273
1 day(s) R2                        : -81.86068673
1 day(s) Pearson r                 : 0.11240491
1 day(s) QLIKE                     : 0.25865857
3 day(s) MAE                       : 5.99968232
3 day(s) RMSE                      : 9.42503100
3 day(s) R2                        : -57.11456436
3 day(s) Pearson r                 : 0.10444405
3 day(s) QLIKE                     : 0.26802778
5 day(s) MAE                       : 5.67413671
5 day(s) RMSE                      : 9.46334962
5 day(s) R2                        : -57.55704044
5 day(s) Pearson r                 : 0.06587238
5 day(s) QLIKE                     : 0.25707987
10 day(s) MAE                      : 6.23848177
10 day(s) RMSE                     : 11.77317360
10 day(s) R2                       : -89.43853710
10 day(s) Pearson r                : 0.03615388
10 day(s) QLIKE                    : 0.26042923
20 day(s) MAE                      : 5.95342050
20 day(s) RMSE                     : 10.79475762
20 day(s) R2                       : -75.02314433
20 day(s) Pearson r                : 0.03310982
20 day(s) QLIKE                    : 0.26129253
full horizon MAE                   : 5.78031217
full horizon RMSE                  : 10.06567958
full horizon R2                    : -65.08543874
full horizon Pearson r             : 0.03109776
full horizon QLIKE                 : 0.27303312

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/test_EURUSD_CKAN_model_all_task.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00805188, max=4.70148

All tasks EURUSD 3 with Custom KAN without CV¶

In [714]:
import os
save_test_EURUSD_CKAN_model_all_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_CKAN_model_all_task.pkl"
)

tall_EURUSD_CKAN_results, tall_EURUSD_CKAN_nested_results, tall_EURUSD_CKAN_best_model, tall_EURUSD_CKAN_best_params,test_y_data = train_and_evaluate_model(
    model_type="Custom_KAN",
    X_price=eur8_X_price,
    X_time=eur8_X_time,
    y=eur8_y,
    no_tasks=eur8_y.shape[2],
    use_nested_cv=False,
    flatten=True,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=False,
    normalize_y=True,
    save_model_path=save_test_EURUSD_CKAN_model_all_file_path,
    lr=5e-3,
    epochs=100,
    batch_size=256,
    verbose=True,
    time_horizon=20,
    hidden_layers=4,
    dropout=0.00,
    l2_weight=1e-6,
    hidden_dim=128,
    knots=20,
    spline_power=7,
    target_mode="log_mse",
    min_epochs=50
)
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 20
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525025
  Min value:  -0.7148707238463011
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_train_core (log_mse scaled):
Shape: (2723, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.7474473273136155
  Min value:  -3.7200073694899167
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.408921165171427
  Min value:  -0.7103978753601863
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_val (log_mse scaled):
Shape: (302, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.321220211077452
  Min value:  -2.97689474283611
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144182
  Min value:  -0.7173043382357188
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_test (log_mse scaled):
Shape: (757, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3856309038737455
  Min value:  -5.35233858226595
Epoch 001 | phase=1 | train_loss=4.7726 | val_main=1.229541
Epoch 002 | phase=1 | train_loss=4.4469 | val_main=1.228539
Epoch 003 | phase=1 | train_loss=4.2562 | val_main=1.229186
Epoch 004 | phase=1 | train_loss=4.0207 | val_main=1.228739
Epoch 005 | phase=1 | train_loss=3.8035 | val_main=1.229743
Epoch 006 | phase=1 | train_loss=3.5642 | val_main=1.229248
Epoch 007 | phase=1 | train_loss=3.5649 | val_main=1.229328
Epoch 008 | phase=1 | train_loss=3.3068 | val_main=1.229107
Epoch 009 | phase=1 | train_loss=3.0165 | val_main=1.228857
Epoch 010 | phase=1 | train_loss=2.9009 | val_main=1.228606
Epoch 011 | phase=1 | train_loss=2.9138 | val_main=1.228859
Epoch 012 | phase=1 | train_loss=2.7339 | val_main=1.229179
Epoch 013 | phase=1 | train_loss=2.5972 | val_main=1.228245
Epoch 014 | phase=1 | train_loss=2.4470 | val_main=1.228035
Epoch 015 | phase=1 | train_loss=2.2927 | val_main=1.227029
Epoch 016 | phase=0 | train_loss=3.2557 | val_main=0.986940
Epoch 017 | phase=0 | train_loss=2.9604 | val_main=0.822020
Epoch 018 | phase=0 | train_loss=2.6815 | val_main=0.749213
Epoch 019 | phase=0 | train_loss=2.6514 | val_main=0.675409
Epoch 020 | phase=0 | train_loss=2.4726 | val_main=0.543417
Epoch 021 | phase=0 | train_loss=2.3284 | val_main=0.410265
Epoch 022 | phase=0 | train_loss=2.1391 | val_main=0.383468
Epoch 023 | phase=0 | train_loss=1.9481 | val_main=0.397956
Epoch 024 | phase=0 | train_loss=1.8922 | val_main=0.336679
Epoch 025 | phase=0 | train_loss=1.7886 | val_main=0.350582
Epoch 026 | phase=0 | train_loss=1.7588 | val_main=0.344210
Epoch 027 | phase=0 | train_loss=1.6948 | val_main=0.355806
Epoch 028 | phase=0 | train_loss=1.5929 | val_main=0.341606
Epoch 029 | phase=0 | train_loss=1.6388 | val_main=0.342273
Epoch 030 | phase=0 | train_loss=1.6189 | val_main=0.345956
Epoch 031 | phase=2 | train_loss=0.2069 | val_main=0.354302
Epoch 032 | phase=2 | train_loss=0.1975 | val_main=0.355053
Epoch 033 | phase=2 | train_loss=0.1908 | val_main=0.351979
Epoch 034 | phase=2 | train_loss=0.1856 | val_main=0.359418
Epoch 035 | phase=2 | train_loss=0.1786 | val_main=0.360988
Epoch 036 | phase=2 | train_loss=0.1739 | val_main=0.364167
Epoch 037 | phase=2 | train_loss=0.1693 | val_main=0.358324
Epoch 038 | phase=2 | train_loss=0.1629 | val_main=0.361569
Epoch 039 | phase=2 | train_loss=0.1574 | val_main=0.377995
Epoch 040 | phase=2 | train_loss=0.1527 | val_main=0.370100
Epoch 041 | phase=2 | train_loss=0.1480 | val_main=0.375942
Epoch 042 | phase=2 | train_loss=0.1443 | val_main=0.383986
Epoch 043 | phase=2 | train_loss=0.1399 | val_main=0.389461
Epoch 044 | phase=2 | train_loss=0.1356 | val_main=0.377532
Epoch 045 | phase=2 | train_loss=0.1333 | val_main=0.390113
Epoch 046 | phase=2 | train_loss=0.1315 | val_main=0.390065
Epoch 047 | phase=2 | train_loss=0.1291 | val_main=0.381394
Epoch 048 | phase=2 | train_loss=0.1293 | val_main=0.404602
Epoch 049 | phase=2 | train_loss=0.1304 | val_main=0.397060
Epoch 050 | phase=2 | train_loss=0.1370 | val_main=0.413439
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 120
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 4
dropout: 0.00000000
lr: 0.00500000
l2_weight: 0.00000100
epochs: 100
min_epochs: 50
batch_size: 256
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.09136260
1 day(s) RMSE                      : 0.17393491
1 day(s) R2                        : 0.33717959
1 day(s) Pearson r                 : 0.59127648
1 day(s) QLIKE                     : 0.40121835
3 day(s) MAE                       : 0.09374071
3 day(s) RMSE                      : 0.17936543
3 day(s) R2                        : 0.28970714
3 day(s) Pearson r                 : 0.55008661
3 day(s) QLIKE                     : 0.44030811
5 day(s) MAE                       : 0.09399874
5 day(s) RMSE                      : 0.18101771
5 day(s) R2                        : 0.26833984
5 day(s) Pearson r                 : 0.52983445
5 day(s) QLIKE                     : 0.44940892
10 day(s) MAE                      : 0.09613963
10 day(s) RMSE                     : 0.18763360
10 day(s) R2                       : 0.20637668
10 day(s) Pearson r                : 0.47954500
10 day(s) QLIKE                    : 0.47086224
20 day(s) MAE                      : 0.09943945
20 day(s) RMSE                     : 0.19234848
20 day(s) R2                       : 0.15615461
20 day(s) Pearson r                : 0.43424892
20 day(s) QLIKE                    : 0.49919692
full horizon MAE                   : 0.09943945
full horizon RMSE                  : 0.19234848
full horizon R2                    : 0.15615461
full horizon Pearson r             : 0.43424892
full horizon QLIKE                 : 0.49919692

--- Task 2 ---
1 day(s) MAE                       : 0.03622647
1 day(s) RMSE                      : 0.06335180
1 day(s) R2                        : -1.03739512
1 day(s) Pearson r                 : -0.05437391
1 day(s) QLIKE                     : 18.04056766
3 day(s) MAE                       : 0.03591437
3 day(s) RMSE                      : 0.06036139
3 day(s) R2                        : -0.84912485
3 day(s) Pearson r                 : -0.06309015
3 day(s) QLIKE                     : 18.23287484
5 day(s) MAE                       : 0.03784409
5 day(s) RMSE                      : 0.09981290
5 day(s) R2                        : -4.05276416
5 day(s) Pearson r                 : -0.03935359
5 day(s) QLIKE                     : 18.30068912
10 day(s) MAE                      : 0.03767041
10 day(s) RMSE                     : 0.08765311
10 day(s) R2                       : -2.89134231
10 day(s) Pearson r                : -0.04813478
10 day(s) QLIKE                    : 18.40840440
20 day(s) MAE                      : 0.04446403
20 day(s) RMSE                     : 0.18997778
20 day(s) R2                       : -17.26731078
20 day(s) Pearson r                : -0.04160523
20 day(s) QLIKE                    : 18.67206182
full horizon MAE                   : 0.04446403
full horizon RMSE                  : 0.18997778
full horizon R2                    : -17.26731078
full horizon Pearson r             : -0.04160523
full horizon QLIKE                 : 18.67206182

--- Task 3 ---
1 day(s) MAE                       : 0.58053479
1 day(s) RMSE                      : 0.69206814
1 day(s) R2                        : -2.54591453
1 day(s) Pearson r                 : 0.06981244
1 day(s) QLIKE                     : 10.25943273
3 day(s) MAE                       : 0.59120694
3 day(s) RMSE                      : 0.70432404
3 day(s) R2                        : -2.67223147
3 day(s) Pearson r                 : 0.04661128
3 day(s) QLIKE                     : 10.32903089
5 day(s) MAE                       : 0.58823040
5 day(s) RMSE                      : 0.70549868
5 day(s) R2                        : -2.68364808
5 day(s) Pearson r                 : 0.03798989
5 day(s) QLIKE                     : 10.77718823
10 day(s) MAE                      : 0.60552771
10 day(s) RMSE                     : 0.75233869
10 day(s) R2                       : -3.18686353
10 day(s) Pearson r                : -0.00140290
10 day(s) QLIKE                    : 10.93415770
20 day(s) MAE                      : 0.60521273
20 day(s) RMSE                     : 0.74937638
20 day(s) R2                       : -3.15228925
20 day(s) Pearson r                : -0.02209214
20 day(s) QLIKE                    : 10.62201733
full horizon MAE                   : 0.60521273
full horizon RMSE                  : 0.74937638
full horizon R2                    : -3.15228925
full horizon Pearson r             : -0.02209214
full horizon QLIKE                 : 10.62201733

--- Task 4 ---
1 day(s) MAE                       : 0.03495275
1 day(s) RMSE                      : 0.05977991
1 day(s) R2                        : -0.27912815
1 day(s) Pearson r                 : -0.07340220
1 day(s) QLIKE                     : 10.83685775
3 day(s) MAE                       : 0.03472426
3 day(s) RMSE                      : 0.05960171
3 day(s) R2                        : -0.27132211
3 day(s) Pearson r                 : -0.06539381
3 day(s) QLIKE                     : 10.71148951
5 day(s) MAE                       : 0.03468325
5 day(s) RMSE                      : 0.05954163
5 day(s) R2                        : -0.26841654
5 day(s) Pearson r                 : -0.06179265
5 day(s) QLIKE                     : 10.87783253
10 day(s) MAE                      : 0.03531393
10 day(s) RMSE                     : 0.05965116
10 day(s) R2                       : -0.27228877
10 day(s) Pearson r                : -0.06275671
10 day(s) QLIKE                    : 11.36560265
20 day(s) MAE                      : 0.03715652
20 day(s) RMSE                     : 0.06013314
20 day(s) R2                       : -0.29213351
20 day(s) Pearson r                : -0.07384959
20 day(s) QLIKE                    : 11.72158916
full horizon MAE                   : 0.03715652
full horizon RMSE                  : 0.06013314
full horizon R2                    : -0.29213351
full horizon Pearson r             : -0.07384959
full horizon QLIKE                 : 11.72158916

--- Task 5 ---
1 day(s) MAE                       : 0.00568916
1 day(s) RMSE                      : 0.00814743
1 day(s) R2                        : -0.91300298
1 day(s) Pearson r                 : 0.16577720
1 day(s) QLIKE                     : 18.03186896
3 day(s) MAE                       : 0.00656486
3 day(s) RMSE                      : 0.00990157
3 day(s) R2                        : -1.82541855
3 day(s) Pearson r                 : 0.15869155
3 day(s) QLIKE                     : 18.09475390
5 day(s) MAE                       : 0.00692765
5 day(s) RMSE                      : 0.01040188
5 day(s) R2                        : -2.11815763
5 day(s) Pearson r                 : 0.16151852
5 day(s) QLIKE                     : 18.13228880
10 day(s) MAE                      : 0.00721463
10 day(s) RMSE                     : 0.01079705
10 day(s) R2                       : -2.35958052
10 day(s) Pearson r                : 0.15117809
10 day(s) QLIKE                    : 18.28974646
20 day(s) MAE                      : 0.00671517
20 day(s) RMSE                     : 0.00991624
20 day(s) R2                       : -1.83714769
20 day(s) Pearson r                : 0.11115134
20 day(s) QLIKE                    : 18.28418225
full horizon MAE                   : 0.00671517
full horizon RMSE                  : 0.00991624
full horizon R2                    : -1.83714769
full horizon Pearson r             : 0.11115134
full horizon QLIKE                 : 18.28418225

--- Task 6 ---
1 day(s) MAE                       : 2.03278998
1 day(s) RMSE                      : 6.36610094
1 day(s) R2                        : -25.52282018
1 day(s) Pearson r                 : 0.15374982
1 day(s) QLIKE                     : 0.05908256
3 day(s) MAE                       : 2.00541216
3 day(s) RMSE                      : 6.23090373
3 day(s) R2                        : -24.39928517
3 day(s) Pearson r                 : 0.15527745
3 day(s) QLIKE                     : 0.05951197
5 day(s) MAE                       : 2.01815231
5 day(s) RMSE                      : 6.30991590
5 day(s) R2                        : -25.03373559
5 day(s) Pearson r                 : 0.14721703
5 day(s) QLIKE                     : 0.06024045
10 day(s) MAE                      : 2.02928520
10 day(s) RMSE                     : 6.45504481
10 day(s) R2                       : -26.18722964
10 day(s) Pearson r                : 0.14504175
10 day(s) QLIKE                    : 0.06040400
20 day(s) MAE                      : 2.04805330
20 day(s) RMSE                     : 6.51018634
20 day(s) R2                       : -26.65074749
20 day(s) Pearson r                : 0.22048868
20 day(s) QLIKE                    : 0.05947961
full horizon MAE                   : 2.04805330
full horizon RMSE                  : 6.51018634
full horizon R2                    : -26.65074749
full horizon Pearson r             : 0.22048868
full horizon QLIKE                 : 0.05947961

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/test_EURUSD_CKAN_model_all_task.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00306101, max=0.921228

Test data¶

In [571]:
test_y_data

y_true_t0 = test_y_data[:, 0, 0]
y_pred_t0 = test_y_data[:, 0, 1]

print(f"y_true @t=0  std={np.nanstd(y_true_t0):.6g}, max={np.nanmax(y_true_t0):.6g}, min={np.nanmin(y_true_t0):.6g}")
print(f"y_pred @t=0  std={np.nanstd(y_pred_t0):.6g}, max={np.nanmax(y_pred_t0):.6g}, min={np.nanmin(y_pred_t0):.6g}")
y_true @t=0  std=0.213643, max=2.66457, min=9.56189e-05
y_pred @t=0  std=0.128761, max=0.734658, min=0.0040167

OHO with parameter search - Custom KAN - all task - data 3 - horizon 1¶

In [ ]:
import os

save_test_OHO_CKAN_model_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_OHO_CKAN_model.pkl"
)

param_grid = {
    "lr": [1e-4, 5e-4],
    #"dropout": [0, 0.1],
    #"l2_weight": [5e-5, 5e-4],
    #"batch_size": [256, 512],
    #"hidden_layers": [1, 2],
    #"hidden_dim": [16, 32],
    #"knots": [5, 8],
    #"spline_power": [3, 4]
}

t1d3_OHO_CKAN_results, t1d3_OHO_CKAN_nested_results, t1d3_OHO_CKAN_best_model, t1d3_OHO_CKAN_best_params, t1d3_OHO_CKAN_y_data  = train_and_evaluate_model(

    param_grid=param_grid,

    verbose=True,
    use_nested_cv=False,
    single_holdout=True,

    merge_price_time=True,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,

    # some models might not need this
    min_delta=1e-4,

    # model name for each
    model_type="Custom_KAN",

    # data for each scenario
    X_price=eur3_X_price,
    X_time=eur3_X_time,
    y=eur3_y,
    
    # specify these for each data
    no_tasks=eur3_y.shape[2],
    flatten=True,
    time_horizon=28,

    # these have to be adjusted for some models
    patience=10,
    epochs=100,
    min_epochs=50,

    # saving model
    save_model_path=save_test_OHO_SMLP_model_file_path,

    # these go into the parameter grid
    lr=1e-3,
    dropout=0,
    l2_weight=1e-4,
    batch_size=512,
    
    hidden_layers=2,
    hidden_dim=64,

    # specific to KAN:
    knots=8,
    spline_power=4,

    # additional parameters for Custom KAN
    warmup_aux_epochs=15,
    joint_epochs=15
)

LSTM KAN¶

Some common helpers¶

In [125]:
from sklearn.base import BaseEstimator, RegressorMixin
import numpy as np
import tensorflow as tf
from tensorflow import keras

class _TeacherForcingScheduler(keras.callbacks.Callback):
    def __init__(self, total_epochs, start=1.0, end=0.0, verbose=0):
        super().__init__()
        self.total_epochs = max(1, int(total_epochs))
        self.start = float(start)
        self.end = float(end)
        self.verbose = int(verbose)

    def on_epoch_begin(self, epoch, logs=None):
        p = epoch / max(1, self.total_epochs - 1)
        ratio = self.start + p * (self.end - self.start)
        ratio = float(np.clip(ratio, 0.0, 1.0))

        if hasattr(self.model, "tf_ratio"):
            self.model.tf_ratio = ratio
        if hasattr(self.model, "teacher_forcing"):
            self.model.teacher_forcing = (ratio > 0.0)

        if self.verbose:
            print(f"[TF Scheduler] epoch={epoch+1} tf_ratio={ratio:.3f} -> TF={'ON' if ratio>0 else 'OFF'}")

LSTM KAN architecture¶

In [126]:
import tensorflow as tf
import keras
from keras import layers
from tkan import TKAN
@keras.saving.register_keras_serializable(package="tkan_seq2seq")

class TKANSeq2Seq(keras.Model):

    def __init__(
        self,
        input_dim: int,
        output_dim: int,
        hidden_layers: int = 2,
        pred_len: int = 28,
        hidden_dim: int = 64,
        dropout: float = 0.0,
        teacher_forcing: bool = True,
        sub_kan_configs=None,
        knots: int = 8,
        spline_power: int = 3,
        name: str = "TKANSeq2Seq",
    ):
        super().__init__(name=name)


        if sub_kan_configs is None:
            sub_kan_configs = [{
                "spline_order": spline_power,
                "grid_size": knots,
                "base_activation": "silu",
                "grid_range": (-1.0, 1.0),
            }]

        self.input_dim = int(input_dim)
        self.output_dim = int(output_dim)
        self.pred_len = int(pred_len)
        self.hidden_dim = hidden_dim
        self.dropout = dropout
        self.teacher_forcing = bool(teacher_forcing)
        self.sub_kan_configs = sub_kan_configs
        self.loss_tracker = keras.metrics.Mean(name="loss")

        self.tf_ratio = 1.0

        enc_layers = hidden_layers
        dec_layers = hidden_layers

        self.enc_blocks = []
        for _ in range(enc_layers):
            self.enc_blocks.append(TKAN(
                units=self.hidden_dim,
                return_sequences=True,
                return_state=False,
                dropout=0.0,
                recurrent_dropout=0.0,
                sub_kan_configs=self.sub_kan_configs,
            ))

        self.enc_last = TKAN(
            units=self.hidden_dim,
            return_sequences=False,
            return_state=True,
            dropout=0.0,
            recurrent_dropout=0.0,
            sub_kan_configs=self.sub_kan_configs,
        )

        self.dec_in_proj = layers.Dense(self.hidden_dim, activation=None, name="dec_in_proj")
        self.dec_layers  = []
        for _ in range(dec_layers):
            self.dec_layers.append(TKAN(
                units=self.hidden_dim,
                return_sequences=True,
                return_state=True,
                dropout=0.0,
                recurrent_dropout=0.0,
                sub_kan_configs=self.sub_kan_configs,
            ))


        self.enc_dropout = layers.Dropout(self.dropout, name="enc_dropout")
        self.dec_dropout = layers.Dropout(self.dropout, name="dec_dropout")
        self.proj        = layers.Dense(self.output_dim, name="out_proj")

        self.start_token = self.add_weight(
            name="start_token",
            shape=(1, 1, self.output_dim),
            initializer="zeros",
            trainable=True,
        )


    def _encode(self, x, training: bool):
        h = x
        for blk in self.enc_blocks:
            h = blk(h, training=training)
            if training and self.dropout > 0:
                h = self.enc_dropout(h)
        enc_out, *enc_states = self.enc_last(h, training=training)
        return enc_out, enc_states

    def _vectorized_decode(self, y_in_seq, enc_states, training: bool):
        x = self.dec_in_proj(y_in_seq)
        if training and self.dropout > 0:
            x = self.dec_dropout(x)

        for i, blk in enumerate(self.dec_layers):
            if i == 0 and enc_states:
                x, *dec_states = blk(x, initial_state=enc_states, training=training)
            else:
                x, *dec_states = blk(x, training=training)
            if training and self.dropout > 0:
                x = self.dec_dropout(x)

        return self.proj(x)

    def _autoregressive_decode(self, enc_states, batch_size, dtype):
        dec_in = tf.tile(tf.cast(self.start_token, dtype), [batch_size, 1, 1])
        states_per_layer = [enc_states] + [None] * (len(self.dec_layers) - 1)

        preds = []
        for _ in range(self.pred_len):
            x = self.dec_in_proj(tf.squeeze(dec_in, axis=1))
            x = tf.expand_dims(x, axis=1)
            out = x
            new_states = []
            for blk, st in zip(self.dec_layers, states_per_layer):
                if st is not None:
                    out, *st_new = blk(out, initial_state=st, training=False)
                else:
                    out, *st_new = blk(out, training=False)
                new_states.append(st_new)
            states_per_layer = new_states

            y_t = self.proj(tf.squeeze(out, axis=1))
            preds.append(tf.expand_dims(y_t, 1))
            dec_in = tf.expand_dims(y_t, 1)

        return tf.concat(preds, axis=1)

    def call(self, inputs, targets=None, training=False):
        _, enc_states = self._encode(inputs, training=training)
        batch = tf.shape(inputs)[0]
        dtype = inputs.dtype

        use_vectorized = bool(training and self.teacher_forcing and (targets is not None) and (self.tf_ratio > 0.0))

        if use_vectorized:
            start = tf.tile(tf.cast(self.start_token, dtype), [batch, 1, 1])
            y_in = tf.concat([start, targets[:, :-1, :]], axis=1)
            return self._vectorized_decode(y_in, enc_states, training=True)
        else:
            return self._autoregressive_decode(enc_states, batch, dtype)

    @property
    def metrics(self):

        return [self.loss_tracker]

    def _compiled_metrics_results(self):
        cm = getattr(self, "compiled_metrics", None)
        if cm is None:
            return {}
        try:
            return cm.result_dict()  
        except Exception:
         
            try:
                names = cm.metrics_names
                values = cm.result()
                return {n: v for n, v in zip(names, values)}
            except Exception:
                return {}

    def train_step(self, data):
        x, y, sw = keras.utils.unpack_x_y_sample_weight(data)
        with tf.GradientTape() as tape:
           
            y_pred = self(x, targets=y, training=True)
          
            loss = self.compute_loss(x, y, y_pred, sw, training=True)

        grads = tape.gradient(loss, self.trainable_variables)
        self.optimizer.apply_gradients(zip(grads, self.trainable_variables))

    
        self.loss_tracker.update_state(loss)
        self.compute_metrics(x, y, y_pred, sw)

       
        logs = {"loss": self.loss_tracker.result()}
        logs.update(self._compiled_metrics_results())
        return logs

    def test_step(self, data):
        x, y, sw = keras.utils.unpack_x_y_sample_weight(data)
       
        y_pred = self(x, targets=None, training=False)

        val_loss = self.compute_loss(x, y, y_pred, sw, training=False)

        self.loss_tracker.update_state(val_loss)
        self.compute_metrics(x, y, y_pred, sw)

        logs = {"loss": self.loss_tracker.result()}
        logs.update(self._compiled_metrics_results())
        return logs



    def get_config(self):
        return {
            "input_dim": self.input_dim,
            "output_dim": self.output_dim,
            "hidden_dim": self.hidden_dim,
            "pred_len": self.pred_len,
            "dropout": self.dropout,
            "teacher_forcing": self.teacher_forcing,
            "sub_kan_configs": self.sub_kan_configs,
            
        }

LSTM KAN wrapper¶

In [127]:
from sklearn.base import BaseEstimator, RegressorMixin
import numpy as np
import tensorflow as tf
from tensorflow import keras

class TKANSeq2SeqWrapper(BaseEstimator, RegressorMixin):
    def __init__(
        self,
        input_dim=10,
        output_dim=1,
        pred_len=28,
        no_tasks=1,
        hidden_layers=6,
        hidden_dim=64,
        dropout=0.2,
        knots=8,
        spline_power=3,
        sub_kan_configs=None,
        teacher_forcing=True,
        tf_ratio_start=1.0,
        tf_ratio_end=0.0,
        lr=1e-3,
        l2_weight=1e-4,
        epochs=100,
        batch_size=32,
        verbose=1,
        patience=10,
        min_delta=1e-4,
        validation_split=0.0,
        seed=42,
        metrics=("mse",),
        run_eagerly=True,
        min_epochs=10,


        loss_type="gauss_nll_var",   
        target_is_logvar=True,         
        nll_eps=1e-12,
        clamp_logvar_min=-20.0,
        clamp_logvar_max=20.0,
        student_df=5.0,
    ):
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.pred_len = pred_len
        self.hidden_layers = hidden_layers
        self.hidden_dim = hidden_dim
        self.dropout = dropout
        self.knots = knots
        self.spline_power = spline_power
        self.sub_kan_configs = sub_kan_configs
        self.teacher_forcing = teacher_forcing
        self.tf_ratio_start = tf_ratio_start
        self.tf_ratio_end = tf_ratio_end
        self.lr = lr
        self.l2_weight = l2_weight
        self.epochs = epochs
        self.batch_size = batch_size
        self.verbose = verbose
        self.patience = patience
        self.min_delta = min_delta
        self.validation_split = validation_split
        self.seed = seed
        self.metrics = metrics
        self.run_eagerly = run_eagerly
        self.min_epochs = min_epochs

        self.no_tasks = no_tasks


        self.loss_type = str(loss_type)
        self.target_is_logvar = bool(target_is_logvar)
        self.nll_eps = float(nll_eps)
        self.clamp_logvar_min = float(clamp_logvar_min)
        self.clamp_logvar_max = float(clamp_logvar_max)
        self.student_df = float(student_df)

        self.model_ = None
        self.history_ = None

        dev = "GPU" if tf.config.list_physical_devices("GPU") else "CPU"
        print(f"Using TensorFlow device: {dev}")

    def _make_loss(self):
        if self.loss_type == "mse":
            return keras.losses.MeanSquaredError()


        def _nll_loss(y_true, y_pred):
            z = tf.clip_by_value(y_pred, self.clamp_logvar_min, self.clamp_logvar_max)
            v = tf.exp(y_true) if self.target_is_logvar else y_true
            v = tf.maximum(v, self.nll_eps)

            if self.loss_type == "gauss_nll_var":

                loss = v * tf.exp(-z) + z
                return tf.reduce_mean(loss)

            if self.loss_type == "student_t_nll_var":
                nu = tf.cast(self.student_df, z.dtype)
                loss = 0.5 * (nu + 1.0) * tf.math.log1p(v / (nu * tf.exp(z))) + 0.5 * z
                return tf.reduce_mean(loss)

            raise ValueError(f"Unknown loss_type: {self.loss_type}")

        return _nll_loss

    def _build(self):
        tf.keras.utils.set_random_seed(self.seed)
        model = TKANSeq2Seq(
            input_dim=self.input_dim,
            output_dim=self.output_dim,
            hidden_layers=self.hidden_layers,
            pred_len=self.pred_len,
            hidden_dim=self.hidden_dim,
            dropout=self.dropout,
            teacher_forcing=self.teacher_forcing,
            sub_kan_configs=self.sub_kan_configs,
            knots=self.knots,
            spline_power=self.spline_power,
        )
        model.tf_ratio = float(self.tf_ratio_start)

        opt = keras.optimizers.AdamW(learning_rate=self.lr, weight_decay=self.l2_weight)

        mets = []
        if self.metrics:
            for m in self.metrics:
                mets.append(m if callable(m) else keras.metrics.get(m))

        model.compile(
            optimizer=opt,
            loss=self._make_loss(),       
            metrics=mets,
            run_eagerly=bool(self.run_eagerly),
            jit_compile=False,
        )
        return model


    def fit(self, X, y, X_val=None, y_val=None):
        if self.model_ is None:
            self.model_ = self._build()

        X = np.asarray(X, dtype=np.float32)
        y = np.asarray(y, dtype=np.float32)

        use_val = (X_val is not None and y_val is not None) or self.validation_split > 0
        monitor_name = "val_loss" if use_val else "loss" 

        callbacks = [
            keras.callbacks.ReduceLROnPlateau(
                monitor=monitor_name, mode="min", factor=0.5, patience=5,
                verbose=bool(self.verbose), min_lr=1e-7,
            ),
            keras.callbacks.EarlyStopping(
                monitor=monitor_name, mode="min", patience=self.patience, min_delta=self.min_delta,
                restore_best_weights=True, verbose=bool(self.verbose),
            ),
            keras.callbacks.TerminateOnNaN(),
        ]

        try:
            if self.teacher_forcing and (self.tf_ratio_start != self.tf_ratio_end):
                callbacks.append(
                    _TeacherForcingScheduler(
                        total_epochs=self.epochs,
                        start=self.tf_ratio_start,
                        end=self.tf_ratio_end,
                        verbose=int(self.verbose),
                    )
                )
        except NameError:
            pass

        if X_val is not None and y_val is not None:
            X_val = np.asarray(X_val, dtype=np.float32)
            y_val = np.asarray(y_val, dtype=np.float32)
            val_data = (X_val, y_val)
            val_split = 0.0
        else:
            val_data = None
            val_split = float(self.validation_split)

        self.history_ = self.model_.fit(
            x=X, y=y,
            batch_size=self.batch_size,
            epochs=self.epochs,
            verbose=self.verbose,
            validation_data=val_data,
            validation_split=val_split,
            shuffle=True,
            callbacks=callbacks,
        )
        return self

    def predict(self, X):
        if self.model_ is None:
            raise RuntimeError("Model is not fitted yet.")
        X = np.asarray(X, dtype=np.float32)
        return np.asarray(self.model_.predict(X, batch_size=self.batch_size, verbose=0))

    def score(self, X, y):
        if self.model_ is None:
            raise RuntimeError("Model is not fitted yet.")
        X = np.asarray(X, dtype=np.float32)
        y = np.asarray(y, dtype=np.float32)
        y_pred = self.model_.predict(X, batch_size=self.batch_size, verbose=0)
        mae = float(np.mean(np.abs(y_pred - y)))
        return -mae

    def get_params(self, deep=True):
        return {
            "input_dim": self.input_dim,
            "output_dim": self.output_dim,
            "pred_len": self.pred_len,
            "hidden_layers": self.hidden_layers,
            "hidden_dim": self.hidden_dim,
            "dropout": self.dropout,
            "knots": self.knots,
            "spline_power": self.spline_power,
            "sub_kan_configs": self.sub_kan_configs,
            "teacher_forcing": self.teacher_forcing,
            "tf_ratio_start": self.tf_ratio_start,
            "tf_ratio_end": self.tf_ratio_end,
            "lr": self.lr,
            "l2_weight": self.l2_weight,
            "epochs": self.epochs,
            "batch_size": self.batch_size,
            "verbose": self.verbose,
            "patience": self.patience,
            "min_delta": self.min_delta,
            "validation_split": self.validation_split,
            "seed": self.seed,
            "metrics": self.metrics,
            "run_eagerly": self.run_eagerly,
            "no_tasks": self.no_tasks,
            "min_epochs": self.min_epochs,
    
            "loss_type": self.loss_type,
            "target_is_logvar": self.target_is_logvar,
            "nll_eps": self.nll_eps,
            "clamp_logvar_min": self.clamp_logvar_min,
            "clamp_logvar_max": self.clamp_logvar_max,
            "student_df": self.student_df,
        }

    def set_params(self, **params):
        for k, v in params.items():
            setattr(self, k, v)
        self.model_ = None
        return self

    def __repr__(self):
        return (f"TKANSeq2SeqWrapper(input_dim={self.input_dim}, output_dim={self.output_dim}, "
                f"pred_len={self.pred_len}, hidden_layers={self.hidden_layers}, hidden_dim={self.hidden_dim}, "
                f"dropout={self.dropout}, lr={self.lr}, l2_weight={self.l2_weight}, "
                f"epochs={self.epochs}, batch_size={self.batch_size}, loss_type={self.loss_type}, "
                f"target_is_logvar={self.target_is_logvar})")
Check optimizer¶
In [128]:
import tensorflow as tf
from tensorflow import keras
print("TF:", tf.__version__)
print("Keras:", keras.__version__)
print("has AdamW:", hasattr(keras.optimizers, "AdamW"))
TF: 2.20.0
Keras: 3.11.3
has AdamW: True

Test 1 task EURUSD with KAN LSTM without CV¶

In [ ]:
import os
save_test_EURUSD_KLSTM_model_1_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_KLSTM_model_1_task.pkl"
)

t1_EURUSD_KLSTM_results, t1_EURUSD_KLSTM_nested_results, t1_EURUSD_KLSTM_best_model, t1_EURUSD_KLSTM_best_params = train_and_evaluate_model(
    model_type="LSTM_KAN",
    X_price=eur2_X_price,
    X_time=eur2_X_time,
    y=eur2_y,
    no_tasks=1,
    use_nested_cv=False,
    flatten=False,
    merge_price_time=True,
    normalize_X=True,
    normalize_Time=False,
    normalize_y=True,
    save_model_path=save_test_EURUSD_KLSTM_model_1_file_path,
    lr=1e-5,
    epochs=10,
    batch_size=32,
    verbose=True,
    hidden_layers=6,
    time_horizon=3
)

Test 3 task EURUSD with KAN LSTM without CV¶

In [185]:
import os
save_test_EURUSD_KLSTM_model_1_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_KLSTM_model_1_task.pkl"
)

t1_EURUSD_KLSTM_results, t1_EURUSD_KLSTM_nested_results, t1_EURUSD_KLSTM_best_model, t1_EURUSD_KLSTM_best_params, _ = train_and_evaluate_model(
    model_type="LSTM_KAN",
    X_price=eur5_X_price,
    X_time=eur5_X_time,
    y=eur5_y,
    no_tasks=1,
    use_nested_cv=False,
    flatten=False,
    merge_price_time=True,
    normalize_X=True,
    normalize_Time=False,
    normalize_y=True,
    save_model_path=save_test_EURUSD_KLSTM_model_1_file_path,
    lr=1e-3,
    epochs=10,
    batch_size=512,
    verbose=True,
    time_horizon=28,
    l2_weight=1e-5,
    dropout=0.0,
    hidden_dim=32,
    hidden_layers=2,
    knots=8,
    spline_power=5,
    target_mode="log_mse",
)
merge_price_time=True with normalize_X=True but normalize_Time=False -> enabling normalize_Time=True for scale alignment.
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 3782
Time steps for y: 28
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  0.0
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  0.0
Checking y_train_core (log_mse scaled):
Shape: (2723, 28, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.999757059156098
  Min value:  -3.719395912581499
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.40793072075585246
  Min value:  0.00022460106458896233
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.1249999999999998
  Min value:  0.0
Checking y_val (log_mse scaled):
Shape: (302, 28, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3220010936712705
  Min value:  -2.976262445400759
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4442211143925813
  Min value:  -0.00012220230225973657
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.4999999999999996
  Min value:  0.0
Checking y_test (log_mse scaled):
Shape: (757, 28, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.386413592859664
  Min value:  -5.35177290393992
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/10
6/6 ━━━━━━━━━━━━━━━━━━━━ 27s 4s/step - task0_mae: 0.8754 - loss: 0.0000e+00 - val_loss: 0.0000e+00 - learning_rate: 0.0010
[TF Scheduler] epoch=2 tf_ratio=0.889 -> TF=ON
Epoch 2/10
6/6 ━━━━━━━━━━━━━━━━━━━━ 24s 4s/step - task0_mae: 1.0434 - loss: 0.0000e+00 - val_loss: 0.0000e+00 - learning_rate: 0.0010
[TF Scheduler] epoch=3 tf_ratio=0.778 -> TF=ON
Epoch 3/10
6/6 ━━━━━━━━━━━━━━━━━━━━ 25s 4s/step - task0_mae: 1.0745 - loss: 0.0000e+00 - val_loss: 0.0000e+00 - learning_rate: 0.0010
[TF Scheduler] epoch=4 tf_ratio=0.667 -> TF=ON
Epoch 4/10
6/6 ━━━━━━━━━━━━━━━━━━━━ 24s 4s/step - task0_mae: 1.1223 - loss: 0.0000e+00 - val_loss: 0.0000e+00 - learning_rate: 0.0010
[TF Scheduler] epoch=5 tf_ratio=0.556 -> TF=ON
Epoch 5/10
6/6 ━━━━━━━━━━━━━━━━━━━━ 39s 7s/step - task0_mae: 1.1496 - loss: 0.0000e+00 - val_loss: 0.0000e+00 - learning_rate: 0.0010
[TF Scheduler] epoch=6 tf_ratio=0.444 -> TF=ON
Epoch 6/10
6/6 ━━━━━━━━━━━━━━━━━━━━ 0s 4s/step - task0_mae: 0.6425 - loss: 0.0000e+00
Epoch 6: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.
6/6 ━━━━━━━━━━━━━━━━━━━━ 24s 4s/step - task0_mae: 1.2422 - loss: 0.0000e+00 - val_loss: 0.0000e+00 - learning_rate: 0.0010
[TF Scheduler] epoch=7 tf_ratio=0.333 -> TF=ON
Epoch 7/10
6/6 ━━━━━━━━━━━━━━━━━━━━ 24s 4s/step - task0_mae: 1.2529 - loss: 0.0000e+00 - val_loss: 0.0000e+00 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.222 -> TF=ON
Epoch 8/10
6/6 ━━━━━━━━━━━━━━━━━━━━ 24s 4s/step - task0_mae: 1.2768 - loss: 0.0000e+00 - val_loss: 0.0000e+00 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.111 -> TF=ON
Epoch 9/10
6/6 ━━━━━━━━━━━━━━━━━━━━ 24s 4s/step - task0_mae: 1.3005 - loss: 0.0000e+00 - val_loss: 0.0000e+00 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=10 tf_ratio=0.000 -> TF=OFF
Epoch 10/10
6/6 ━━━━━━━━━━━━━━━━━━━━ 28s 5s/step - task0_mae: 1.1675 - loss: 0.0000e+00 - val_loss: 0.0000e+00 - learning_rate: 5.0000e-04
Restoring model weights from the end of the best epoch: 1.

Parameters used in the single-fit model:
input_dim: 15
output_dim: 1
pred_len: 28
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 3
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00100000
l2_weight: 0.00001000
epochs: 10
batch_size: 512
verbose: True
patience: 10
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mae',)
run_eagerly: True
no_tasks: 1
min_epochs: 50
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.12668956
1 day(s) RMSE                      : 0.22896372
1 day(s) R2                        : -0.14856526
1 day(s) Pearson r                 : -0.04227649
1 day(s) QLIKE                     : 0.58526546
3 day(s) MAE                       : 0.12495201
3 day(s) RMSE                      : 0.22610730
3 day(s) R2                        : -0.12872758
3 day(s) Pearson r                 : -0.00310846
3 day(s) QLIKE                     : 0.58486418
5 day(s) MAE                       : 0.12347921
5 day(s) RMSE                      : 0.22334783
5 day(s) R2                        : -0.11386038
5 day(s) Pearson r                 : 0.01339354
5 day(s) QLIKE                     : 0.58250906
10 day(s) MAE                      : 0.12193738
10 day(s) RMSE                     : 0.21978296
10 day(s) R2                       : -0.08888299
10 day(s) Pearson r                : -0.00494824
10 day(s) QLIKE                    : 0.58367020
20 day(s) MAE                      : 0.12049498
20 day(s) RMSE                     : 0.21627614
20 day(s) R2                       : -0.06684808
20 day(s) Pearson r                : 0.00091059
20 day(s) QLIKE                    : 0.57910560
full horizon MAE                   : 0.11990987
full horizon RMSE                  : 0.21490506
full horizon R2                    : -0.05907460
full horizon Pearson r             : -0.00068505
full horizon QLIKE                 : 0.57684500

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/test_EURUSD_KLSTM_model_1_task.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.108293, max=0.167544

All tasks EURUSD task with KAN LSTM without CV¶

In [ ]:
import os
save_test_EURUSD_KLSTM_model_all_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_KLSTM_model_all_task.pkl"
)

tall_EURUSD_KLSTM_results, tall_EURUSD_KLSTM_nested_results, tall_EURUSD_KLSTM_best_model, tall_EURUSD_KLSTM_best_params, _ = train_and_evaluate_model(
    model_type="LSTM_KAN",
    X_price=eur2_X_price,
    X_time=eur2_X_time,
    y=eur2_y,
    no_tasks=eur2_y.shape[2],
    use_nested_cv=False,
    flatten=False,
    merge_price_time=True,
    normalize_X=True,
    normalize_Time=False,
    normalize_y=True,
    save_model_path=save_test_EURUSD_KLSTM_model_all_file_path,
    lr=1e-5,
    epochs=10,
    batch_size=32,
    verbose=True,
    hidden_layers=6,
    time_horizon=3,
    dropout=0.0,
    l2_weight=8e-5
)

OHO with parameter search - KAN LSTM - all task - data 3 - horizon 1¶

In [196]:
import os

save_test_OHO_LSTM_KAN_model_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_OHO_LSTM_KAN_model.pkl"
)

param_grid = {
    "lr": [1e-4, 5e-4],
    #"dropout": [0, 0.1],
    #"l2_weight": [5e-5, 5e-4],
    #"batch_size": [256, 512],
    #"hidden_layers": [1, 2],
    #"hidden_dim": [16, 32],
    #"knots": [5, 8],
    #"spline_power": [3, 4]
}

t1d3_OHO_LSTM_KAN_results, t1d3_OHO_LSTM_KAN_nested_results, t1d3_OHO_LSTM_KAN_best_model, t1d3_OHO_LSTM_KAN_best_params, t1d3_OHO_LSTM_KAN_y_data  = train_and_evaluate_model(

    param_grid=param_grid,

    verbose=True,
    use_nested_cv=False,
    single_holdout=True,

    merge_price_time=True,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,

    # some models might not need this
    min_delta=1e-4,

    # model name for each
    model_type="LSTM_KAN",

    # data for each scenario
    X_price=eur3_X_price,
    X_time=eur3_X_time,
    y=eur3_y,
    
    # specify these for each data
    no_tasks=eur3_y.shape[2],
    flatten=False,
    time_horizon=28,

    # these have to be adjusted for some models
    patience=2,
    epochs=2,
    min_epochs=50,

    # saving model
    save_model_path=save_test_OHO_LSTM_KAN_model_file_path,

    # these go into the parameter grid
    lr=1e-3,
    dropout=0,
    l2_weight=1e-4,
    batch_size=512,
    
    hidden_layers=2,
    hidden_dim=64,

    # specific to KAN:
    knots=8,
    spline_power=4
)
model parameter(s) from the grid will overwrite any overlapping parameters provided directly to this function
Using TensorFlow device: CPU
Batch size for y: 3782
Time steps for y: 28
Features for y: 8

Single holdout: one train/test split, param search on a single train/val (early-stop) split.
Using TensorFlow device: CPU
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/2
6/6 ━━━━━━━━━━━━━━━━━━━━ 38s 6s/step - mean_absolute_error: 0.5906 - task0_mae: 1.4416 - loss: 0.4629 - val_loss: 0.5554 - learning_rate: 1.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.000 -> TF=OFF
Epoch 2/2
6/6 ━━━━━━━━━━━━━━━━━━━━ 43s 7s/step - mean_absolute_error: 0.5121 - task0_mae: 1.2399 - loss: 0.4046 - val_loss: 0.4209 - learning_rate: 1.0000e-04
Using TensorFlow device: CPU
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/2
6/6 ━━━━━━━━━━━━━━━━━━━━ 34s 6s/step - mean_absolute_error: 0.3171 - task0_mae: 0.6151 - loss: 0.1605 - val_loss: 0.1526 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.000 -> TF=OFF
Epoch 2/2
6/6 ━━━━━━━━━━━━━━━━━━━━ 41s 7s/step - mean_absolute_error: 0.1534 - task0_mae: 0.2156 - loss: 0.0450 - val_loss: 0.0399 - learning_rate: 5.0000e-04

Best parameters found in single holdout:
  lr: 0.0005

Results for the best model from sigle hold out evaluation:

--- Task 1 ---
1 day(s) MAE (log-var)             : 3.93651289
1 day(s) RMSE (log-var)            : 4.14713040
1 day(s) R2 (log-var)              : -8.82663641
1 day(s) MAE (var)                 : 0.19000056
1 day(s) RMSE (var)                : 0.28589766
1 day(s) R2 (var)                  : -0.79079991
1 day(s) QLIKE (var)               : 0.57916934
5 day(s) MAE (log-var)             : 2.55176348
5 day(s) RMSE (log-var)            : 2.85592445
5 day(s) R2 (log-var)              : -3.67925370
5 day(s) MAE (var)                 : 0.17845661
5 day(s) RMSE (var)                : 0.27554818
5 day(s) R2 (var)                  : -0.69537565
5 day(s) QLIKE (var)               : 1.06056685
10 day(s) MAE (log-var)            : 1.94874356
10 day(s) RMSE (log-var)           : 2.29416732
10 day(s) R2 (log-var)             : -2.02725954
10 day(s) MAE (var)                : 0.16535712
10 day(s) RMSE (var)               : 0.26422668
10 day(s) R2 (var)                 : -0.57380208
10 day(s) QLIKE (var)              : 1.27184295
20 day(s) MAE (log-var)            : 1.52568635
20 day(s) RMSE (log-var)           : 1.88862504
20 day(s) R2 (log-var)             : -1.06662496
20 day(s) MAE (var)                : 0.15141187
20 day(s) RMSE (var)               : 0.25169113
20 day(s) R2 (var)                 : -0.44485672
20 day(s) QLIKE (var)              : 1.30833529
full horizon MAE (log-var)         : 1.38949726
full horizon RMSE (log-var)        : 1.74890400
full horizon R2 (log-var)          : -0.77829021
full horizon MAE (var)             : 0.14618016
full horizon RMSE (var)            : 0.24673757
full horizon R2 (var)              : -0.39607093
full horizon QLIKE (var)           : 1.25994395

--- Task 2 ---
1 day(s) MAE (log-var)             : 1.80365233
1 day(s) RMSE (log-var)            : 1.85142712
1 day(s) R2 (log-var)              : -18.46555842
1 day(s) MAE (var)                 : 0.95131805
1 day(s) RMSE (var)                : 1.20654478
1 day(s) R2 (var)                  : -1.64131612
1 day(s) QLIKE (var)               : 0.09891012
5 day(s) MAE (log-var)             : 0.86759052
5 day(s) RMSE (log-var)            : 1.08020955
5 day(s) R2 (log-var)              : -5.68971925
5 day(s) MAE (var)                 : 0.63248601
5 day(s) RMSE (var)                : 0.98264787
5 day(s) R2 (var)                  : -0.76586653
5 day(s) QLIKE (var)               : 0.27586387
10 day(s) MAE (log-var)            : 0.58529117
10 day(s) RMSE (log-var)           : 0.82017658
10 day(s) R2 (log-var)             : -2.85904176
10 day(s) MAE (var)                : 0.48841831
10 day(s) RMSE (var)               : 0.87660320
10 day(s) R2 (var)                 : -0.40756332
10 day(s) QLIKE (var)              : 0.29418165
20 day(s) MAE (log-var)            : 0.44162949
20 day(s) RMSE (log-var)           : 0.65131334
20 day(s) R2 (log-var)             : -1.42378039
20 day(s) MAE (var)                : 0.41383111
20 day(s) RMSE (var)               : 0.81424810
20 day(s) R2 (var)                 : -0.21438597
20 day(s) QLIKE (var)              : 0.24546811
full horizon MAE (log-var)         : 0.40146950
full horizon RMSE (log-var)        : 0.59498486
full horizon R2 (log-var)          : -1.01122624
full horizon MAE (var)             : 0.39369370
full horizon RMSE (var)            : 0.79655456
full horizon R2 (var)              : -0.15852655
full horizon QLIKE (var)           : 0.21697894

--- Task 3 ---
1 day(s) MAE (log-var)             : 0.97092714
1 day(s) RMSE (log-var)            : 1.05502547
1 day(s) R2 (log-var)              : -5.44888903
1 day(s) MAE (var)                 : 0.12374214
1 day(s) RMSE (var)                : 0.14973537
1 day(s) R2 (var)                  : -2.14115990
1 day(s) QLIKE (var)               : 0.08853388
5 day(s) MAE (log-var)             : 0.64576940
5 day(s) RMSE (log-var)            : 0.78171473
5 day(s) R2 (log-var)              : -2.53812826
5 day(s) MAE (var)                 : 0.09683277
5 day(s) RMSE (var)                : 0.12843972
5 day(s) R2 (var)                  : -1.31183482
5 day(s) QLIKE (var)               : 0.11218053
10 day(s) MAE (log-var)            : 0.52067592
10 day(s) RMSE (log-var)           : 0.65917843
10 day(s) R2 (log-var)             : -1.50738010
10 day(s) MAE (var)                : 0.08451465
10 day(s) RMSE (var)               : 0.11679067
10 day(s) R2 (var)                 : -0.90353055
10 day(s) QLIKE (var)              : 0.11757253
20 day(s) MAE (log-var)            : 0.44621262
20 day(s) RMSE (log-var)           : 0.57329767
20 day(s) R2 (log-var)             : -0.90828395
20 day(s) MAE (var)                : 0.07655790
20 day(s) RMSE (var)               : 0.10768740
20 day(s) R2 (var)                 : -0.64486780
20 day(s) QLIKE (var)              : 0.11152702
full horizon MAE (log-var)         : 0.42401668
full horizon RMSE (log-var)        : 0.54497061
full horizon R2 (log-var)          : -0.72917501
full horizon MAE (var)             : 0.07412867
full horizon RMSE (var)            : 0.10462966
full horizon R2 (var)              : -0.56525399
full horizon QLIKE (var)           : 0.10703553

--- Task 4 ---
1 day(s) MAE (log-var)             : 0.03318924
1 day(s) RMSE (log-var)            : 0.05082743
1 day(s) R2 (log-var)              : -0.51433180
1 day(s) MAE (var)                 : 0.03465350
1 day(s) RMSE (var)                : 0.05373536
1 day(s) R2 (var)                  : -0.50271982
1 day(s) QLIKE (var)               : 0.00086767
5 day(s) MAE (log-var)             : 0.03295480
5 day(s) RMSE (log-var)            : 0.04874052
5 day(s) R2 (log-var)              : -0.39112055
5 day(s) MAE (var)                 : 0.03441683
5 day(s) RMSE (var)                : 0.05163753
5 day(s) R2 (var)                  : -0.38632885
5 day(s) QLIKE (var)               : 0.00087492
10 day(s) MAE (log-var)            : 0.03282187
10 day(s) RMSE (log-var)           : 0.04770258
10 day(s) R2 (log-var)             : -0.33053036
10 day(s) MAE (var)                : 0.03428216
10 day(s) RMSE (var)               : 0.05059532
10 day(s) R2 (var)                 : -0.32902886
10 day(s) QLIKE (var)              : 0.00087477
20 day(s) MAE (log-var)            : 0.03275883
20 day(s) RMSE (log-var)           : 0.04728500
20 day(s) R2 (log-var)             : -0.30637260
20 day(s) MAE (var)                : 0.03421827
20 day(s) RMSE (var)               : 0.05017771
20 day(s) R2 (var)                 : -0.30624346
20 day(s) QLIKE (var)              : 0.00087307
full horizon MAE (log-var)         : 0.03273719
full horizon RMSE (log-var)        : 0.04720377
full horizon R2 (log-var)          : -0.30048989
full horizon MAE (var)             : 0.03419606
full horizon RMSE (var)            : 0.05009705
full horizon R2 (var)              : -0.30070398
full horizon QLIKE (var)           : 0.00087322

--- Task 5 ---
1 day(s) MAE (log-var)             : 0.55275843
1 day(s) RMSE (log-var)            : 0.57902098
1 day(s) R2 (log-var)              : -1.48209932
1 day(s) MAE (var)                 : 1.02586376
1 day(s) RMSE (var)                : 1.10287229
1 day(s) R2 (var)                  : -2.00032076
1 day(s) QLIKE (var)               : 0.05634189
5 day(s) MAE (log-var)             : 0.37983284
5 day(s) RMSE (log-var)            : 0.43177457
5 day(s) R2 (log-var)              : -0.37974825
5 day(s) MAE (var)                 : 0.73443925
5 day(s) RMSE (var)                : 0.80693283
5 day(s) R2 (var)                  : -0.60533308
5 day(s) QLIKE (var)               : 0.07214640
10 day(s) MAE (log-var)            : 0.31151573
10 day(s) RMSE (log-var)           : 0.40830946
10 day(s) R2 (log-var)             : -0.23322096
10 day(s) MAE (var)                : 0.59823603
10 day(s) RMSE (var)               : 0.73258637
10 day(s) R2 (var)                 : -0.32209512
10 day(s) QLIKE (var)              : 0.07332714
20 day(s) MAE (log-var)            : 0.27027386
20 day(s) RMSE (log-var)           : 0.40080039
20 day(s) R2 (log-var)             : -0.18780149
20 day(s) MAE (var)                : 0.51304923
20 day(s) RMSE (var)               : 0.70045206
20 day(s) R2 (var)                 : -0.20788526
20 day(s) QLIKE (var)              : 0.06875442
full horizon MAE (log-var)         : 0.25833183
full horizon RMSE (log-var)        : 0.39891947
full horizon R2 (log-var)          : -0.17609724
full horizon MAE (var)             : 0.48833760
full horizon RMSE (var)            : 0.69161317
full horizon R2 (var)              : -0.17669355
full horizon QLIKE (var)           : 0.06616395

--- Task 6 ---
1 day(s) MAE (log-var)             : 0.03596696
1 day(s) RMSE (log-var)            : 0.06391233
1 day(s) R2 (log-var)              : -0.46208614
1 day(s) MAE (var)                 : 0.03801219
1 day(s) RMSE (var)                : 0.06848914
1 day(s) R2 (var)                  : -0.44387652
1 day(s) QLIKE (var)               : 0.00143396
5 day(s) MAE (log-var)             : 0.03351514
5 day(s) RMSE (log-var)            : 0.05947228
5 day(s) R2 (log-var)              : -0.26546364
5 day(s) MAE (var)                 : 0.03556753
5 day(s) RMSE (var)                : 0.06405089
5 day(s) R2 (var)                  : -0.26231253
5 day(s) QLIKE (var)               : 0.00145894
10 day(s) MAE (log-var)            : 0.03341353
10 day(s) RMSE (log-var)           : 0.05792654
10 day(s) R2 (log-var)             : -0.19978405
10 day(s) MAE (var)                : 0.03546976
10 day(s) RMSE (var)               : 0.06249478
10 day(s) R2 (var)                 : -0.20102017
10 day(s) QLIKE (var)              : 0.00145458
20 day(s) MAE (log-var)            : 0.03302684
20 day(s) RMSE (log-var)           : 0.05773608
20 day(s) R2 (log-var)             : -0.19117151
20 day(s) MAE (var)                : 0.03508066
20 day(s) RMSE (var)               : 0.06231513
20 day(s) R2 (var)                 : -0.19344267
20 day(s) QLIKE (var)              : 0.00144681
full horizon MAE (log-var)         : 0.03284114
full horizon RMSE (log-var)        : 0.05785597
full horizon R2 (log-var)          : -0.19526123
full horizon MAE (var)             : 0.03489332
full horizon RMSE (var)            : 0.06244109
full horizon R2 (var)              : -0.19746465
full horizon QLIKE (var)           : 0.00144543

--- Task 7 ---
1 day(s) MAE (log-var)             : 0.04759812
1 day(s) RMSE (log-var)            : 0.04886986
1 day(s) R2 (log-var)              : -19.27078629
1 day(s) MAE (var)                 : 0.04661437
1 day(s) RMSE (var)                : 0.04790358
1 day(s) R2 (var)                  : -18.57648857
1 day(s) QLIKE (var)               : 0.00006117
5 day(s) MAE (log-var)             : 0.02352775
5 day(s) RMSE (log-var)            : 0.02868812
5 day(s) R2 (log-var)              : -6.10328262
5 day(s) MAE (var)                 : 0.02324642
5 day(s) RMSE (var)                : 0.02828649
5 day(s) R2 (var)                  : -5.93874059
5 day(s) QLIKE (var)               : 0.00017135
10 day(s) MAE (log-var)            : 0.01597419
10 day(s) RMSE (log-var)           : 0.02173750
10 day(s) R2 (log-var)             : -3.16167225
10 day(s) MAE (var)                : 0.01583594
10 day(s) RMSE (var)               : 0.02146572
10 day(s) R2 (var)                 : -3.07588998
10 day(s) QLIKE (var)              : 0.00018902
20 day(s) MAE (log-var)            : 0.01289671
20 day(s) RMSE (log-var)           : 0.01786056
20 day(s) R2 (log-var)             : -1.96736162
20 day(s) MAE (var)                : 0.01283933
20 day(s) RMSE (var)               : 0.01769270
20 day(s) R2 (var)                 : -1.92091726
20 day(s) QLIKE (var)              : 0.00016001
full horizon MAE (log-var)         : 0.01213233
full horizon RMSE (log-var)        : 0.01663233
full horizon R2 (log-var)          : -1.68180270
full horizon MAE (var)             : 0.01210145
full horizon RMSE (var)            : 0.01650622
full horizon R2 (var)              : -1.64720697
full horizon QLIKE (var)           : 0.00013741

--- Task 8 ---
1 day(s) MAE (log-var)             : 15.24759175
1 day(s) RMSE (log-var)            : 15.31449974
1 day(s) R2 (log-var)              : -152.48936511
1 day(s) MAE (var)                 : 447.66567997
1 day(s) RMSE (var)                : 1158.67960227
1 day(s) R2 (var)                  : -0.17546504
1 day(s) QLIKE (var)               : 1.27194453
5 day(s) MAE (log-var)             : 21.67547224
5 day(s) RMSE (log-var)            : 22.26225903
5 day(s) R2 (log-var)              : -323.06177861
5 day(s) MAE (var)                 : 446.55337948
5 day(s) RMSE (var)                : 1158.42842366
5 day(s) R2 (var)                  : -0.17453112
5 day(s) QLIKE (var)               : 7.31252967
10 day(s) MAE (log-var)            : 25.53451828
10 day(s) RMSE (log-var)           : 26.09846510
10 day(s) R2 (log-var)             : -443.42283556
10 day(s) MAE (var)                : 445.44172123
10 day(s) RMSE (var)               : 1158.21455403
10 day(s) R2 (var)                 : -0.17358786
10 day(s) QLIKE (var)              : 4.84700475
20 day(s) MAE (log-var)            : 29.70220325
20 day(s) RMSE (log-var)           : 30.25634569
20 day(s) R2 (log-var)             : -596.24563481
20 day(s) MAE (var)                : 443.12924324
20 day(s) RMSE (var)               : 1157.67494551
20 day(s) R2 (var)                 : -0.17166929
20 day(s) QLIKE (var)              : 3.89911597
full horizon MAE (log-var)         : 31.36066033
full horizon RMSE (log-var)        : 31.85186402
full horizon R2 (log-var)          : -661.53959262
full horizon MAE (var)             : 441.90029645
full horizon RMSE (var)            : 1157.38516751
full horizon R2 (var)              : -0.17065623
full horizon QLIKE (var)           : 3.12527219

Best single-holdout model saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/test_OHO_LSTM_KAN_model.pkl

Custom LSTM KAN¶

Custom KAN LSTM architecture¶

In [129]:
import tensorflow as tf
import keras
from keras import layers
from tkan import TKAN

@keras.saving.register_keras_serializable(package="tkan_seq2seq")
class TKANHierarchicalSeq2Seq(keras.Model):
    def __init__(
        self,
        input_dim: int,
        output_dim: int,
        no_tasks: int = 3,
        task_output_dim: int = 1,
        hidden_layers: int = 2,
        pred_len: int = 28,
        hidden_dim: int = 32,
        dropout: float = 0.2,
        teacher_forcing: bool = True,
        sub_kan_configs=None,
        knots: int = 8,
        spline_power: int = 3,
        name: str = "TKANHierarchicalSeq2Seq",
    ):
        super().__init__(name=name)
        if sub_kan_configs is None:
            sub_kan_configs = [{
                "spline_order": spline_power,
                "grid_size": knots,
                "base_activation": "silu",
                "grid_range": (-1.0, 1.0),
            }]

        assert output_dim == no_tasks * task_output_dim
        self.loss_tracker = keras.metrics.Mean(name="loss")

        self.input_dim = int(input_dim)
        self.output_dim = int(output_dim)
        self.no_tasks = int(no_tasks)
        self.task_output_dim = int(task_output_dim)
        self.pred_len = int(pred_len)
        self.hidden_dim = int(hidden_dim)
        self.dropout = float(dropout)
        self.teacher_forcing = bool(teacher_forcing)
        self.sub_kan_configs = sub_kan_configs
        self.tf_ratio = 1.0

        enc_layers = int(hidden_layers)
        dec_layers = int(hidden_layers)

        self.enc_blocks = []
        for _ in range(enc_layers):
            self.enc_blocks.append(TKAN(
                units=self.hidden_dim,
                return_sequences=True,
                return_state=False,
                dropout=0.0,
                recurrent_dropout=0.0,
                sub_kan_configs=self.sub_kan_configs,
            ))
        self.enc_last = TKAN(
            units=self.hidden_dim,
            return_sequences=False,
            return_state=True,
            dropout=0.0,
            recurrent_dropout=0.0,
            sub_kan_configs=self.sub_kan_configs,
        )

        self.dec_in_proj = []
        self.dec_layers = []
        self.out_proj = []
        self.start_tokens = []

        for t in range(self.no_tasks):
            if t == 0:
                dec_in_dim = 2 * self.task_output_dim
            elif t == 1:
                dec_in_dim = max(1, (self.no_tasks - 2) * self.task_output_dim)
            else:
                dec_in_dim = self.task_output_dim

            self.dec_in_proj.append(layers.Dense(self.hidden_dim, activation=None, name=f"dec{t}_in_proj"))

            task_stack = []
            for _ in range(dec_layers):
                task_stack.append(TKAN(
                    units=self.hidden_dim,
                    return_sequences=True,
                    return_state=True,
                    dropout=0.0,
                    recurrent_dropout=0.0,
                    sub_kan_configs=self.sub_kan_configs,
                ))
            self.dec_layers.append(task_stack)

            self.out_proj.append(layers.Dense(self.task_output_dim, name=f"out_proj_task_{t}"))

            if t >= 2:
                self.start_tokens.append(
                    self.add_weight(
                        name=f"start_token_task_{t}",
                        shape=(1, 1, dec_in_dim),
                        initializer="zeros",
                        trainable=True,
                    )
                )
            else:
                self.start_tokens.append(None)

        self.enc_dropout = layers.Dropout(self.dropout, name="enc_dropout")
        self.dec_dropout = layers.Dropout(self.dropout, name="dec_dropout")
        self.enc_to_task0 = layers.Dense(self.task_output_dim, name="enc_to_task0")

    def _encode(self, x, training: bool):
        h = x
        for blk in self.enc_blocks:
            h = blk(h, training=training)
            if training and self.dropout > 0.0:
                h = self.enc_dropout(h)
        enc_out, *enc_states = self.enc_last(h, training=training)
        return enc_out, enc_states

    def _vectorized_decode_task(self, task_idx, y_in_seq, enc_states, training: bool):
        x = self.dec_in_proj[task_idx](y_in_seq)
        if training and self.dropout > 0.0:
            x = self.dec_dropout(x)
        out = x
        states = enc_states
        for i, blk in enumerate(self.dec_layers[task_idx]):
            if i == 0 and states:
                out, *states = blk(out, initial_state=states, training=training)
            else:
                out, *states = blk(out, training=training)
            if training and self.dropout > 0.0:
                out = self.dec_dropout(out)
        return self.out_proj[task_idx](out)

    def _autoregressive_decode_aux(self, task_idx, enc_states, batch_size, dtype):
        assert task_idx >= 2
        if self.start_tokens[task_idx] is not None:
            dec_in = tf.tile(tf.cast(self.start_tokens[task_idx], dtype), [batch_size, 1, 1])
        else:
            dec_in = tf.zeros([batch_size, 1, self.task_output_dim], dtype=dtype)

        num_layers = len(self.dec_layers[task_idx])
        zero_state = tf.nest.map_structure(lambda s: tf.zeros_like(s), enc_states)
        states_per_layer = [enc_states] + [zero_state] * (num_layers - 1)
        ta = tf.TensorArray(dtype=dtype, size=self.pred_len)

        def cond(t, *_):
            return t < self.pred_len

        def body(t, dec_in, states_per_layer, ta):
            x = self.dec_in_proj[task_idx](tf.squeeze(dec_in, axis=1))
            x = tf.expand_dims(x, 1)
            out = x
            new_states = []
            for blk, st in zip(self.dec_layers[task_idx], states_per_layer):
                out, *st_new = blk(out, initial_state=st, training=False)
                new_states.append(st_new)
            y_t = self.out_proj[task_idx](tf.squeeze(out, axis=1))
            ta = ta.write(t, y_t)
            return t + 1, tf.expand_dims(y_t, 1), new_states, ta

        _, _, _, ta = tf.while_loop(
            cond, body,
            loop_vars=[tf.constant(0), dec_in, states_per_layer, ta],
            parallel_iterations=1
        )
        return tf.transpose(ta.stack(), [1, 0, 2])

    def _build_task1_input_seq(self, preds_or_targets_list, dtype):
        if preds_or_targets_list:
            return tf.concat(preds_or_targets_list, axis=-1)
        return tf.zeros([tf.shape(preds_or_targets_list)[0], self.pred_len, self.task_output_dim], dtype=dtype)

    def call(self, inputs, targets=None, training=False):
        enc_out, enc_states = self._encode(inputs, training=training)
        batch = tf.shape(inputs)[0]
        dtype = inputs.dtype
        use_vectorized = bool(training and self.teacher_forcing and (targets is not None) and (self.tf_ratio > 0.0))

        aux_preds = []
        if use_vectorized:
            for t in range(2, self.no_tasks):
                if self.start_tokens[t] is not None:
                    start = tf.tile(tf.cast(self.start_tokens[t], dtype), [batch, 1, 1])
                else:
                    start = tf.zeros([batch, 1, self.task_output_dim], dtype=dtype)
                s = t * self.task_output_dim
                e = s + self.task_output_dim
                y_gt = targets[:, :, s:e]
                y_in = tf.concat([start, y_gt[:, :-1, :]], axis=1)
                aux_pred_t = self._vectorized_decode_task(t, y_in, enc_states, training=True)
                aux_preds.append(aux_pred_t)
            if self.no_tasks > 2:
                parts = [targets[:, :, tt*self.task_output_dim:(tt+1)*self.task_output_dim]
                         for tt in range(2, self.no_tasks)]
                y_in1 = tf.concat(parts, axis=-1)
            else:
                y_in1 = tf.zeros([batch, self.pred_len, self.task_output_dim], dtype=dtype)
            task1 = self._vectorized_decode_task(1, y_in1, enc_states, training=True)

            enc_proj = self.enc_to_task0(enc_out)
            enc_seq = tf.tile(tf.expand_dims(enc_proj, 1), [1, self.pred_len, 1])
            s1 = 1 * self.task_output_dim
            e1 = s1 + self.task_output_dim
            t1_seq = targets[:, :, s1:e1]
            y_in0 = tf.concat([t1_seq, enc_seq], axis=-1)
            task0 = self._vectorized_decode_task(0, y_in0, enc_states, training=True)
        else:
            for t in range(2, self.no_tasks):
                aux_pred_t = self._autoregressive_decode_aux(t, enc_states, batch, dtype)
                aux_preds.append(aux_pred_t)
            if self.no_tasks > 2:
                y_in1 = tf.concat(aux_preds, axis=-1)
            else:
                y_in1 = tf.zeros([batch, self.pred_len, self.task_output_dim], dtype=dtype)
            task1 = self._vectorized_decode_task(1, y_in1, enc_states, training=False)

            enc_proj = self.enc_to_task0(enc_out)
            enc_seq = tf.tile(tf.expand_dims(enc_proj, 1), [1, self.pred_len, 1])
            y_in0 = tf.concat([task1, enc_seq], axis=-1)
            task0 = self._vectorized_decode_task(0, y_in0, enc_states, training=False)

        outs = [task0, task1] + aux_preds
        return tf.concat(outs, axis=-1)

    @property
    def metrics(self):
        return [self.loss_tracker]

    def _compiled_metrics_results(self):
        cm = getattr(self, "compiled_metrics", None)
        if cm is None:
            return {}
        try:
            return cm.result_dict()
        except Exception:
            try:
                names = cm.metrics_names
                values = cm.result()
                return {n: v for n, v in zip(names, values)}
            except Exception:
                return {}

    def train_step(self, data):
        x, y, sw = keras.utils.unpack_x_y_sample_weight(data)
        with tf.GradientTape() as tape:
            y_pred = self(x, targets=y, training=True)
            loss = self.compute_loss(x, y, y_pred, sw, training=True)
        grads = tape.gradient(loss, self.trainable_variables)
        self.optimizer.apply_gradients(zip(grads, self.trainable_variables))
        self.loss_tracker.update_state(loss)
        self.compute_metrics(x, y, y_pred, sw)
        logs = {"loss": self.loss_tracker.result()}
        logs.update(self._compiled_metrics_results())
        return logs

    def test_step(self, data):
        x, y, sw = keras.utils.unpack_x_y_sample_weight(data)
        y_pred = self(x, targets=None, training=False)
        s, e = 0, self.task_output_dim
        val_loss = self.compute_loss(x, y[:, :, s:e], y_pred[:, :, s:e], sw, training=False)
        self.loss_tracker.update_state(val_loss)
        self.compute_metrics(x, y, y_pred, sw)
        logs = {"loss": self.loss_tracker.result()}
        logs.update(self._compiled_metrics_results())
        return logs

    def get_config(self):
        return {
            "input_dim": self.input_dim,
            "output_dim": self.output_dim,
            "no_tasks": self.no_tasks,
            "task_output_dim": self.task_output_dim,
            "pred_len": self.pred_len,
            "hidden_dim": self.hidden_dim,
            "dropout": self.dropout,
            "teacher_forcing": self.teacher_forcing,
            "sub_kan_configs": self.sub_kan_configs,
        }

Custom KAN LSTM wrapper¶

In [130]:
from sklearn.base import BaseEstimator, RegressorMixin
import numpy as np
import tensorflow as tf
from tensorflow import keras

class TKANHierMTLWrapper(BaseEstimator, RegressorMixin):
    def __init__(
        self,
        input_dim=10,
        no_tasks=3,
        task_output_dim=1,
        pred_len=28,
        hidden_layers=3,
        hidden_dim=32,
        dropout=0.2,
        knots=8,
        spline_power=3,
        sub_kan_configs=None,
        teacher_forcing=True,
        tf_ratio_start=1.0,
        tf_ratio_end=0.0,
        lr=1e-3,
        l2_weight=1e-4,
        epochs=10,
        batch_size=512,
        verbose=1,
        patience=10,
        min_delta=1e-4,
        validation_split=0.0,
        seed=42,
        metrics=("mse",),
        run_eagerly=False,
        output_dim=None,
        min_epochs=10,

        loss_type="mse",           
        target_is_logvar=False,     
        nll_eps=1e-12,
        clamp_logvar_min=-20.0,
        clamp_logvar_max=20.0,
        student_df=5.0,
    ):
        self.input_dim = int(input_dim)
        self.no_tasks = int(no_tasks)
        self.task_output_dim = int(task_output_dim)
        self.pred_len = int(pred_len)
        self.hidden_layers = int(hidden_layers)
        self.hidden_dim = int(hidden_dim)
        self.dropout = float(dropout)
        self.knots = int(knots)
        self.spline_power = int(spline_power)
        self.sub_kan_configs = sub_kan_configs
        self.teacher_forcing = bool(teacher_forcing)
        self.tf_ratio_start = float(tf_ratio_start)
        self.tf_ratio_end = float(tf_ratio_end)
        self.lr = float(lr)
        self.l2_weight = float(l2_weight)
        self.epochs = int(epochs)
        self.batch_size = int(batch_size)
        self.verbose = int(verbose)
        self.patience = int(patience)
        self.min_delta = float(min_delta)
        self.validation_split = float(validation_split)
        self.seed = int(seed)
        self.metrics = metrics
        self.run_eagerly = bool(run_eagerly)
        self.min_epochs = int(min_epochs)
        

        if output_dim is None:
            self.output_dim = self.no_tasks * self.task_output_dim
        else:
            self.output_dim = int(output_dim)
            if self.output_dim != self.no_tasks * self.task_output_dim:
                self.task_output_dim = int(max(1, self.output_dim // max(1, self.no_tasks)))
                self.output_dim = self.no_tasks * self.task_output_dim

        self.loss_type = str(loss_type)
        self.target_is_logvar = bool(target_is_logvar)
        self.nll_eps = float(nll_eps)
        self.clamp_logvar_min = float(clamp_logvar_min)
        self.clamp_logvar_max = float(clamp_logvar_max)
        self.student_df = float(student_df)

        self.model_ = None
        self.history_ = None

        dev = "GPU" if tf.config.list_physical_devices("GPU") else "CPU"
        print(f"Using TensorFlow device: {dev}")

    def _make_loss(self):
        if self.loss_type == "mse":
            # MSE over all dimensions
            def _mse(y_true, y_pred):
                return tf.reduce_mean(tf.square(y_true - y_pred))
            return _mse

        def _nll_loss(y_true, y_pred):
            # y_pred is log-variance when using NLL variants
            z = tf.clip_by_value(y_pred, self.clamp_logvar_min, self.clamp_logvar_max)
            v = tf.exp(y_true) if self.target_is_logvar else y_true
            v = tf.maximum(v, self.nll_eps)

            if self.loss_type == "gauss_nll_var":
                # up to constants: v*exp(-z) + z
                return tf.reduce_mean(v * tf.exp(-z) + z)

            if self.loss_type == "student_t_nll_var":
                nu = tf.cast(self.student_df, z.dtype)
                return tf.reduce_mean(0.5 * (nu + 1.0) * tf.math.log1p(v / (nu * tf.exp(z))) + 0.5 * z)

            raise ValueError(f"Unknown loss_type: {self.loss_type}")

        return _nll_loss


    def _build(self):
        tf.keras.utils.set_random_seed(self.seed)
        model = TKANHierarchicalSeq2Seq(
            input_dim=self.input_dim,
            output_dim=self.output_dim,
            no_tasks=self.no_tasks,
            task_output_dim=self.task_output_dim,
            hidden_layers=self.hidden_layers,
            pred_len=self.pred_len,
            hidden_dim=self.hidden_dim,
            dropout=self.dropout,
            teacher_forcing=self.teacher_forcing,
            sub_kan_configs=self.sub_kan_configs,
            knots=self.knots,
            spline_power=self.spline_power,
        )
        model.tf_ratio = float(self.tf_ratio_start)

        opt = keras.optimizers.AdamW(learning_rate=self.lr, weight_decay=self.l2_weight)

     
        loss_fn = self._make_loss()
        model._loss_fn = loss_fn   

        mets = []
        if self.metrics:
            for m in self.metrics:
                mets.append(m if callable(m) else keras.metrics.get(m))

        model.compile(
            optimizer=opt,
            loss=loss_fn,           
            metrics=mets,
            run_eagerly=bool(self.run_eagerly),
            jit_compile=False,
        )
        return model




    def fit(self, X, y, X_val=None, y_val=None):
        X = np.asarray(X, dtype=np.float32)
        y = np.asarray(y, dtype=np.float32)

        desired_input_dim = int(X.shape[2])
        desired_pred_len = int(y.shape[1])
        desired_output_dim = int(y.shape[2])

        if desired_output_dim % max(1, self.no_tasks) != 0:
            self.no_tasks = 1
            self.task_output_dim = desired_output_dim
        else:
            self.task_output_dim = desired_output_dim // max(1, self.no_tasks)

        rebuild_needed = (
            self.model_ is None or
            self.pred_len != desired_pred_len or
            self.output_dim != desired_output_dim or
            self.input_dim != desired_input_dim
        )
        if rebuild_needed:
            self.pred_len = desired_pred_len
            self.output_dim = desired_output_dim
            self.input_dim = desired_input_dim
            self.model_ = self._build()

        use_val = (X_val is not None and y_val is not None) or self.validation_split > 0
        monitor_name = "val_loss" if use_val else "loss"

        callbacks = [
            keras.callbacks.ReduceLROnPlateau(
                monitor=monitor_name, mode="min", factor=0.5, patience=5,
                verbose=bool(self.verbose), min_lr=1e-7,
            ),
            keras.callbacks.EarlyStopping(
                monitor=monitor_name, mode="min", patience=self.patience, min_delta=self.min_delta,
                restore_best_weights=True, verbose=bool(self.verbose),
            ),
            keras.callbacks.TerminateOnNaN(),
        ]
        if self.teacher_forcing and (self.tf_ratio_start != self.tf_ratio_end):
            callbacks.append(
                _TeacherForcingScheduler(
                    total_epochs=self.epochs,
                    start=self.tf_ratio_start,
                    end=self.tf_ratio_end,
                    verbose=int(self.verbose),
                )
            )

        if X_val is not None and y_val is not None:
            X_val = np.asarray(X_val, dtype=np.float32)
            y_val = np.asarray(y_val, dtype=np.float32)
            val_data = (X_val, y_val); val_split = 0.0
        else:
            val_data = None; val_split = float(self.validation_split)

        self.history_ = self.model_.fit(
            x=X, y=y,
            batch_size=self.batch_size,
            epochs=self.epochs,
            verbose=self.verbose,
            validation_data=val_data,
            validation_split=val_split,
            shuffle=True,
            callbacks=callbacks,
        )
        return self


    def predict(self, X):
        if self.model_ is None:
            raise RuntimeError("Model is not fitted yet.")
        X = np.asarray(X, dtype=np.float32)
        return np.asarray(self.model_.predict(X, batch_size=self.batch_size, verbose=0))

    def score(self, X, y):
        if self.model_ is None:
            raise RuntimeError("Model is not fitted yet.")
        X = np.asarray(X, dtype=np.float32)
        y = np.asarray(y, dtype=np.float32)
        y_pred = self.model_.predict(X, batch_size=self.batch_size, verbose=0)
        mae = float(np.mean(np.abs(y_pred - y)))
        return -mae

    def get_params(self, deep=True):
        return {
            "input_dim": self.input_dim,
            "no_tasks": self.no_tasks,
            "task_output_dim": self.task_output_dim,
            "pred_len": self.pred_len,
            "hidden_layers": self.hidden_layers,
            "hidden_dim": self.hidden_dim,
            "dropout": self.dropout,
            "knots": self.knots,
            "spline_power": self.spline_power,
            "sub_kan_configs": self.sub_kan_configs,
            "teacher_forcing": self.teacher_forcing,
            "tf_ratio_start": self.tf_ratio_start,
            "tf_ratio_end": self.tf_ratio_end,
            "lr": self.lr,
            "l2_weight": self.l2_weight,
            "epochs": self.epochs,
            "batch_size": self.batch_size,
            "verbose": self.verbose,
            "patience": self.patience,
            "min_delta": self.min_delta,
            "validation_split": self.validation_split,
            "seed": self.seed,
            "metrics": self.metrics,
            "run_eagerly": self.run_eagerly,
            "min_epochs": self.min_epochs,
            "output_dim": self.output_dim,

            "loss_type": self.loss_type,
            "target_is_logvar": self.target_is_logvar,
            "nll_eps": self.nll_eps,
            "clamp_logvar_min": self.clamp_logvar_min,
            "clamp_logvar_max": self.clamp_logvar_max,
            "student_df": self.student_df,
        }

    def set_params(self, **params):
        for k, v in params.items():
            setattr(self, k, v)
        if "output_dim" in params or "no_tasks" in params or "task_output_dim" in params:
            if self.output_dim != self.no_tasks * self.task_output_dim:
                self.task_output_dim = int(max(1, self.output_dim // max(1, self.no_tasks)))
                self.output_dim = self.no_tasks * self.task_output_dim
        self.model_ = None
        return self

    def __repr__(self):
        return (
            f"TKANHierMTLWrapper(input_dim={self.input_dim}, no_tasks={self.no_tasks}, "
            f"task_output_dim={self.task_output_dim}, pred_len={self.pred_len}, hidden_layers={self.hidden_layers}, "
            f"hidden_dim={self.hidden_dim}, dropout={self.dropout}, lr={self.lr}, l2_weight={self.l2_weight}, "
            f"epochs={self.epochs}, batch_size={self.batch_size}, loss_type={self.loss_type}, "
            f"target_is_logvar={self.target_is_logvar})"
        )

All tasks 3 EURUSD task with Custom KAN LSTM without CV¶

In [ ]:
import os
save_test_EURUSD_KCLSTM_model_all_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_EURUSD_KCLSTM_model_all_task.pkl"
)

tall_EURUSD_KCLSTM_results, tall_EURUSD_KCLSTM_nested_results, tall_EURUSD_KCLSTM_best_model, tall_EURUSD_KCLSTM_best_params, _ = train_and_evaluate_model(
    model_type="Custom_KAN_LSTM",
    X_price=eur7_X_price,
    X_time=eur7_X_time,
    y=eur7_y,
    no_tasks=eur7_y.shape[2],
    use_nested_cv=False,
    flatten=False,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=False,
    normalize_y=True,
    save_model_path=save_test_EURUSD_KCLSTM_model_all_file_path,
    lr=5e-4,
    epochs=30,
    batch_size=16,
    verbose=True,
    time_horizon=1,
    hidden_layers=2,
    l2_weight=1e-5,
    dropout=0,
    hidden_dim=64,
    knots=8,
    spline_power=5,
    min_delta=1e-3,
    min_epochs=10,
    patience=10,
    target_mode="log_mse"
)

OHO with parameter search - Custom KAN LSTM - all task - data 3 - horizon 1¶

In [201]:
import os

save_test_OHO_CLSTM_KAN_model_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_OHO_CLSTM_KAN_model.pkl"
)

param_grid = {
    "lr": [1e-4, 5e-4],
    #"dropout": [0, 0.1],
    #"l2_weight": [5e-5, 5e-4],
    #"batch_size": [256, 512],
    #"hidden_layers": [1, 2],
    #"hidden_dim": [16, 32],
    #"knots": [5, 8],
    #"spline_power": [3, 4]
}

t1d3_OHO_CLSTM_KAN_results, t1d3_OHO_CLSTM_KAN_nested_results, t1d3_OHO_CLSTM_KAN_best_model, t1d3_OHO_CLSTM_KAN_best_params, t1d3_OHO_CLSTM_KAN_y_data  = train_and_evaluate_model(

    param_grid=param_grid,

    verbose=True,
    use_nested_cv=False,
    single_holdout=True,

    merge_price_time=True,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,

    # some models might not need this
    min_delta=1e-4,

    # model name for each
    model_type="Custom_KAN_LSTM",

    # data for each scenario
    X_price=eur3_X_price,
    X_time=eur3_X_time,
    y=eur3_y,
    
    # specify these for each data
    no_tasks=eur3_y.shape[2],
    flatten=False,
    time_horizon=28,

    # these have to be adjusted for some models
    patience=2,
    epochs = 2,
    min_epochs=50,

    # saving model
    save_model_path=save_test_OHO_LSTM_KAN_model_file_path,

    # these go into the parameter grid
    lr=1e-3,
    dropout=0,
    l2_weight=1e-4,
    batch_size=512,
    
    hidden_layers=2,
    hidden_dim=64,

    # specific to KAN:
    knots=8,
    spline_power=4
)
model parameter(s) from the grid will overwrite any overlapping parameters provided directly to this function
Using TensorFlow device: CPU
Batch size for y: 3782
Time steps for y: 28
Features for y: 8

Single holdout: one train/test split, param search on a single train/val (early-stop) split.
Using TensorFlow device: CPU
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/2
6/6 ━━━━━━━━━━━━━━━━━━━━ 182s 26s/step - mean_absolute_error: 0.3246 - task0_mae: 0.6071 - task1_mae: 0.1114 - task2_mae: 0.0997 - task3_mae: 0.1119 - task4_mae: 1.1774 - task5_mae: 0.1263 - task6_mae: 0.3181 - task7_mae: 0.0453 - loss: 0.2243 - val_loss: 0.2467 - learning_rate: 1.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.000 -> TF=OFF
Epoch 2/2
6/6 ━━━━━━━━━━━━━━━━━━━━ 28s 5s/step - mean_absolute_error: 0.2464 - task0_mae: 0.4439 - task1_mae: 0.1136 - task2_mae: 0.2011 - task3_mae: 0.1285 - task4_mae: 0.8715 - task5_mae: 0.0374 - task6_mae: 0.1335 - task7_mae: 0.0418 - loss: 0.1154 - val_loss: 0.1386 - learning_rate: 1.0000e-04
Using TensorFlow device: CPU
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/2
6/6 ━━━━━━━━━━━━━━━━━━━━ 191s 27s/step - mean_absolute_error: 0.1727 - task0_mae: 0.1310 - task1_mae: 0.1124 - task2_mae: 0.3709 - task3_mae: 0.1350 - task4_mae: 0.1279 - task5_mae: 0.0564 - task6_mae: 0.3823 - task7_mae: 0.0656 - loss: 0.0466 - val_loss: 0.0582 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.000 -> TF=OFF
Epoch 2/2
6/6 ━━━━━━━━━━━━━━━━━━━━ 25s 4s/step - mean_absolute_error: 0.1299 - task0_mae: 0.1180 - task1_mae: 0.0899 - task2_mae: 0.1134 - task3_mae: 0.1117 - task4_mae: 0.1285 - task5_mae: 0.1273 - task6_mae: 0.3019 - task7_mae: 0.0482 - loss: 0.0451 - val_loss: 0.0317 - learning_rate: 5.0000e-04

Best parameters found in single holdout:
  lr: 0.0005

Results for the best model from sigle hold out evaluation:

--- Task 1 ---
1 day(s) MAE (log-var)             : 2.68392154
1 day(s) RMSE (log-var)            : 2.89003553
1 day(s) R2 (log-var)              : -3.77216960
1 day(s) MAE (var)                 : 0.18449505
1 day(s) RMSE (var)                : 0.28185531
1 day(s) R2 (var)                  : -0.74051713
1 day(s) QLIKE (var)               : 0.57941347
5 day(s) MAE (log-var)             : 1.40092186
5 day(s) RMSE (log-var)            : 1.81345628
5 day(s) R2 (log-var)              : -0.88667608
5 day(s) MAE (var)                 : 0.14422600
5 day(s) RMSE (var)                : 0.24300331
5 day(s) R2 (var)                  : -0.31854578
5 day(s) QLIKE (var)               : 1.26226930
10 day(s) MAE (log-var)            : 1.23373676
10 day(s) RMSE (log-var)           : 1.69352301
10 day(s) R2 (log-var)             : -0.64961152
10 day(s) MAE (var)                : 0.14910073
10 day(s) RMSE (var)               : 0.23191342
10 day(s) R2 (var)                 : -0.21240731
10 day(s) QLIKE (var)              : 1.40578805
20 day(s) MAE (log-var)            : 1.22618712
20 day(s) RMSE (log-var)           : 1.69946145
20 day(s) R2 (log-var)             : -0.67337324
20 day(s) MAE (var)                : 0.17361695
20 day(s) RMSE (var)               : 0.23995094
20 day(s) R2 (var)                 : -0.31320911
20 day(s) QLIKE (var)              : 1.35271367
full horizon MAE (log-var)         : 1.23733805
full horizon RMSE (log-var)        : 1.71275718
full horizon R2 (log-var)          : -0.70554149
full horizon MAE (var)             : 0.18502283
full horizon RMSE (var)            : 0.24558428
full horizon R2 (var)              : -0.38305052
full horizon QLIKE (var)           : 1.27434934

--- Task 2 ---
1 day(s) MAE (log-var)             : 1.49976941
1 day(s) RMSE (log-var)            : 1.55775678
1 day(s) R2 (log-var)              : -12.78011770
1 day(s) MAE (var)                 : 0.89188969
1 day(s) RMSE (var)                : 1.16067724
1 day(s) R2 (var)                  : -1.44431081
1 day(s) QLIKE (var)               : 0.10216207
5 day(s) MAE (log-var)             : 0.73904009
5 day(s) RMSE (log-var)            : 0.92530447
5 day(s) R2 (log-var)              : -3.90864005
5 day(s) MAE (var)                 : 0.58696845
5 day(s) RMSE (var)                : 0.94750572
5 day(s) R2 (var)                  : -0.64182067
5 day(s) QLIKE (var)               : 0.21971033
10 day(s) MAE (log-var)            : 0.53852635
10 day(s) RMSE (log-var)           : 0.72906182
10 day(s) R2 (log-var)             : -2.04925304
10 day(s) MAE (var)                : 0.48107652
10 day(s) RMSE (var)               : 0.87044439
10 day(s) R2 (var)                 : -0.38785437
10 day(s) QLIKE (var)              : 0.20781071
20 day(s) MAE (log-var)            : 0.42913278
20 day(s) RMSE (log-var)           : 0.60130404
20 day(s) R2 (log-var)             : -1.06586319
20 day(s) MAE (var)                : 0.42024657
20 day(s) RMSE (var)               : 0.82278062
20 day(s) R2 (var)                 : -0.23997045
20 day(s) QLIKE (var)              : 0.17457075
full horizon MAE (log-var)         : 0.39829941
full horizon RMSE (log-var)        : 0.55990279
full horizon R2 (log-var)          : -0.78104281
full horizon MAE (var)             : 0.40363663
full horizon RMSE (var)            : 0.80955204
full horizon R2 (var)              : -0.19664264
full horizon QLIKE (var)           : 0.15863610

--- Task 3 ---
1 day(s) MAE (log-var)             : 0.63407002
1 day(s) RMSE (log-var)            : 0.75159483
1 day(s) R2 (log-var)              : -2.27285367
1 day(s) MAE (var)                 : 0.09747066
1 day(s) RMSE (var)                : 0.12830638
1 day(s) R2 (var)                  : -1.30641666
1 day(s) QLIKE (var)               : 0.08576727
5 day(s) MAE (log-var)             : 0.44778890
5 day(s) RMSE (log-var)            : 0.54231695
5 day(s) R2 (log-var)              : -0.70287721
5 day(s) MAE (var)                 : 0.08036708
5 day(s) RMSE (var)                : 0.10140491
5 day(s) R2 (var)                  : -0.44104002
5 day(s) QLIKE (var)               : 0.15289800
10 day(s) MAE (log-var)            : 0.49817380
10 day(s) RMSE (log-var)           : 0.59310579
10 day(s) R2 (log-var)             : -1.02991813
10 day(s) MAE (var)                : 0.09642817
10 day(s) RMSE (var)               : 0.11417958
10 day(s) R2 (var)                 : -0.81936755
10 day(s) QLIKE (var)              : 0.16426887
20 day(s) MAE (log-var)            : 0.52944955
20 day(s) RMSE (log-var)           : 0.62538758
20 day(s) R2 (log-var)             : -1.27081181
20 day(s) MAE (var)                : 0.10607202
20 day(s) RMSE (var)               : 0.12216754
20 day(s) R2 (var)                 : -1.11696117
20 day(s) QLIKE (var)              : 0.14315402
full horizon MAE (log-var)         : 0.53808112
full horizon RMSE (log-var)        : 0.63415987
full horizon R2 (log-var)          : -1.34147919
full horizon MAE (var)             : 0.10872219
full horizon RMSE (var)            : 0.12425843
full horizon R2 (var)              : -1.20763318
full horizon QLIKE (var)           : 0.13151890

--- Task 4 ---
1 day(s) MAE (log-var)             : 0.03286585
1 day(s) RMSE (log-var)            : 0.04834228
1 day(s) R2 (log-var)              : -0.36986897
1 day(s) MAE (var)                 : 0.03432793
1 day(s) RMSE (var)                : 0.05124296
1 day(s) R2 (var)                  : -0.36655219
1 day(s) QLIKE (var)               : 0.00086131
5 day(s) MAE (log-var)             : 0.03295668
5 day(s) RMSE (log-var)            : 0.04681986
5 day(s) R2 (log-var)              : -0.28364452
5 day(s) MAE (var)                 : 0.03441939
5 day(s) RMSE (var)                : 0.04969088
5 day(s) R2 (var)                  : -0.28377441
5 day(s) QLIKE (var)               : 0.00087816
10 day(s) MAE (log-var)            : 0.03305270
10 day(s) RMSE (log-var)           : 0.04778992
10 day(s) R2 (log-var)             : -0.33540691
10 day(s) MAE (var)                : 0.03451335
10 day(s) RMSE (var)               : 0.05065884
10 day(s) R2 (var)                 : -0.33236820
10 day(s) QLIKE (var)              : 0.00087934
20 day(s) MAE (log-var)            : 0.03323816
20 day(s) RMSE (log-var)           : 0.04926447
20 day(s) R2 (log-var)             : -0.41803812
20 day(s) MAE (var)                : 0.03469864
20 day(s) RMSE (var)               : 0.05214531
20 day(s) R2 (var)                 : -0.41069451
20 day(s) QLIKE (var)              : 0.00088549
full horizon MAE (log-var)         : 0.03326666
full horizon RMSE (log-var)        : 0.04962742
full horizon R2 (log-var)          : -0.43746401
full horizon MAE (var)             : 0.03472675
full horizon RMSE (var)            : 0.05251193
full horizon R2 (var)              : -0.42912474
full horizon QLIKE (var)           : 0.00088601

--- Task 5 ---
1 day(s) MAE (log-var)             : 0.61014020
1 day(s) RMSE (log-var)            : 0.65341802
1 day(s) R2 (log-var)              : -2.16091462
1 day(s) MAE (var)                 : 1.10116269
1 day(s) RMSE (var)                : 1.20438065
1 day(s) R2 (var)                  : -2.57803657
1 day(s) QLIKE (var)               : 0.05631448
5 day(s) MAE (log-var)             : 0.39431164
5 day(s) RMSE (log-var)            : 0.46457062
5 day(s) R2 (log-var)              : -0.59731000
5 day(s) MAE (var)                 : 0.74949987
5 day(s) RMSE (var)                : 0.85836889
5 day(s) R2 (var)                  : -0.81651224
5 day(s) QLIKE (var)               : 0.08340053
10 day(s) MAE (log-var)            : 0.31517200
10 day(s) RMSE (log-var)           : 0.44616248
10 day(s) R2 (log-var)             : -0.47247559
10 day(s) MAE (var)                : 0.59856108
10 day(s) RMSE (var)               : 0.80750781
10 day(s) R2 (var)                 : -0.60634378
10 day(s) QLIKE (var)              : 0.09106712
20 day(s) MAE (log-var)            : 0.30372081
20 day(s) RMSE (log-var)           : 0.45819954
20 day(s) R2 (log-var)             : -0.55237589
20 day(s) MAE (var)                : 0.60194535
20 day(s) RMSE (var)               : 0.84437492
20 day(s) R2 (var)                 : -0.75525207
20 day(s) QLIKE (var)              : 0.08539854
full horizon MAE (log-var)         : 0.30480557
full horizon RMSE (log-var)        : 0.46442322
full horizon R2 (log-var)          : -0.59404514
full horizon MAE (var)             : 0.61554893
full horizon RMSE (var)            : 0.86381763
full horizon R2 (var)              : -0.83561242
full horizon QLIKE (var)           : 0.08036330

--- Task 6 ---
1 day(s) MAE (log-var)             : 0.03225871
1 day(s) RMSE (log-var)            : 0.05770228
1 day(s) R2 (log-var)              : -0.19176217
1 day(s) MAE (var)                 : 0.03430740
1 day(s) RMSE (var)                : 0.06228860
1 day(s) R2 (var)                  : -0.19427347
1 day(s) QLIKE (var)               : 0.00142748
5 day(s) MAE (log-var)             : 0.03689556
5 day(s) RMSE (log-var)            : 0.05567842
5 day(s) R2 (log-var)              : -0.10916008
5 day(s) MAE (var)                 : 0.03906877
5 day(s) RMSE (var)                : 0.06004638
5 day(s) R2 (var)                  : -0.10940534
5 day(s) QLIKE (var)               : 0.00152487
10 day(s) MAE (log-var)            : 0.04362043
10 day(s) RMSE (log-var)           : 0.05692159
10 day(s) R2 (log-var)             : -0.15851584
10 day(s) MAE (var)                : 0.04607867
10 day(s) RMSE (var)               : 0.06104370
10 day(s) R2 (var)                 : -0.14589429
10 day(s) QLIKE (var)              : 0.00165412
20 day(s) MAE (log-var)            : 0.05167693
20 day(s) RMSE (log-var)           : 0.06099680
20 day(s) R2 (log-var)             : -0.32951652
20 day(s) MAE (var)                : 0.05456920
20 day(s) RMSE (var)               : 0.06502317
20 day(s) R2 (var)                 : -0.29942378
20 day(s) QLIKE (var)              : 0.00175218
full horizon MAE (log-var)         : 0.05446669
full horizon RMSE (log-var)        : 0.06258972
full horizon R2 (log-var)          : -0.39885434
full horizon MAE (var)             : 0.05751674
full horizon RMSE (var)            : 0.06660715
full horizon R2 (var)              : -0.36258460
full horizon QLIKE (var)           : 0.00175172

--- Task 7 ---
1 day(s) MAE (log-var)             : 0.04744608
1 day(s) RMSE (log-var)            : 0.04865661
1 day(s) R2 (log-var)              : -19.09426421
1 day(s) MAE (var)                 : 0.04646888
1 day(s) RMSE (var)                : 0.04769835
1 day(s) R2 (var)                  : -18.40910950
1 day(s) QLIKE (var)               : 0.00005801
5 day(s) MAE (log-var)             : 0.02263539
5 day(s) RMSE (log-var)            : 0.02813074
5 day(s) R2 (log-var)              : -5.82994363
5 day(s) MAE (var)                 : 0.02236589
5 day(s) RMSE (var)                : 0.02773187
5 day(s) R2 (var)                  : -5.66930994
5 day(s) QLIKE (var)               : 0.00018393
10 day(s) MAE (log-var)            : 0.01567987
10 day(s) RMSE (log-var)           : 0.02151366
10 day(s) R2 (log-var)             : -3.07640772
10 day(s) MAE (var)                : 0.01554860
10 day(s) RMSE (var)               : 0.02124548
10 day(s) R2 (var)                 : -2.99267876
10 day(s) QLIKE (var)              : 0.00019731
20 day(s) MAE (log-var)            : 0.01349725
20 day(s) RMSE (log-var)           : 0.01830391
20 day(s) R2 (log-var)             : -2.11650883
20 day(s) MAE (var)                : 0.01345188
20 day(s) RMSE (var)               : 0.01815117
20 day(s) R2 (var)                 : -2.07425685
20 day(s) QLIKE (var)              : 0.00016813
full horizon MAE (log-var)         : 0.01313063
full horizon RMSE (log-var)        : 0.01742568
full horizon R2 (log-var)          : -1.94374735
full horizon MAE (var)             : 0.01311558
full horizon RMSE (var)            : 0.01732071
full horizon R2 (var)              : -1.91490228
full horizon QLIKE (var)           : 0.00014496

--- Task 8 ---
1 day(s) MAE (log-var)             : 4.39056004
1 day(s) RMSE (log-var)            : 5.11053478
1 day(s) R2 (log-var)              : -16.09249091
1 day(s) MAE (var)                 : 151237.71190155
1 day(s) RMSE (var)                : 709930.53809119
1 day(s) R2 (var)                  : -441279.47368114
1 day(s) QLIKE (var)               : 3.03231370
5 day(s) MAE (log-var)             : 6.36474624
5 day(s) RMSE (log-var)            : 8.06197646
5 day(s) R2 (log-var)              : -41.49838232
5 day(s) MAE (var)                 : 415981889109.44134521
5 day(s) RMSE (var)                : 13783819208236.89062500
5 day(s) R2 (var)                  : -166289663855899049984.00000000
5 day(s) QLIKE (var)               : 20.96723486
10 day(s) MAE (log-var)            : 8.94181164
10 day(s) RMSE (log-var)           : 11.77921956
10 day(s) R2 (log-var)             : -89.53144808
10 day(s) MAE (var)                : 946233916984.50512695
10 day(s) RMSE (var)               : 49298950099814.54687500
10 day(s) R2 (var)                 : -2126243086793607741440.00000000
10 day(s) QLIKE (var)              : 26.60596695
20 day(s) MAE (log-var)            : 13.84901662
20 day(s) RMSE (log-var)           : 17.79384013
20 day(s) R2 (log-var)             : -205.56626927
20 day(s) MAE (var)                : 478929888948.30700684
20 day(s) RMSE (var)               : 34866086521225.21093750
20 day(s) R2 (var)                 : -1062767301019427930112.00000000
20 day(s) QLIKE (var)              : 21.51755678
full horizon MAE (log-var)         : 16.31740725
full horizon RMSE (log-var)        : 20.32954844
full horizon R2 (log-var)          : -268.89671259
full horizon MAE (var)             : 342931639823.38146973
full horizon RMSE (var)            : 29467473182178.47656250
full horizon R2 (var)              : -758855959331431251968.00000000
full horizon QLIKE (var)           : 18.76747863

Best single-holdout model saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/test_OHO_LSTM_KAN_model.pkl

Benchmark models¶

TimesNet¶

TimesNet wrapper¶

In [131]:
import os
from types import SimpleNamespace
from time import perf_counter
import numpy as np
import torch
import torch.nn as nn
from models.TimesNet import Model as TimesNetModel

class TimesNetWrapper:
    def __init__(
        self,
        input_len, output_len, input_dim, output_dim, no_tasks,
        device=None, epochs=10, batch_size=16, verbose=False, lr=1e-3,
        checkpoint_path=None, l2_weight=1e-5, patience=10, min_epochs=50,
        min_delta=1e-4, dropout=0.0,
        hidden_dim=None,
        hidden_layers=None,
        d_model=None,
        d_ff=None,
        n_heads=None,
        e_layers=None,
        factor=2, top_k=3, num_kernels=3, individual=False,
        moving_avg=25, embed="timeF", freq="d", activation="gelu",
        output_attention=False, distil=True, mix=True,
        task_name="long_term_forecast", features="MS",
        loss_type="gauss_nll_var",
        target_is_logvar=True,
        nll_eps=1e-12,
        clamp_logvar_min=-20.0,
        clamp_logvar_max=20.0,
        student_df=5.0,
    ):
        self.input_len  = input_len
        self.output_len = output_len
        self.input_dim  = input_dim
        self.output_dim = output_dim
        self.no_tasks   = no_tasks
        self.device = device or ("mps" if torch.backends.mps.is_available()
                                 else ("cuda" if torch.cuda.is_available() else "cpu"))
        print(f"[TimesNetWrapper] Using device: {self.device}")
        self.epochs = epochs
        self.batch_size = batch_size
        self.verbose = verbose
        self.lr = lr
        self.checkpoint_path = checkpoint_path
        self.l2_weight = l2_weight
        self.patience = patience
        self.min_epochs = min_epochs
        self.min_delta = min_delta
        self.dropout = dropout
        default_d_model  = 64
        default_d_ff     = 256
        default_n_heads  = 4
        default_e_layers = 2
        if d_model is None and hidden_dim is not None:
            d_model = hidden_dim
        if d_ff is None and hidden_dim is not None:
            d_ff = max(4 * (d_model if d_model is not None else default_d_model),
                       hidden_dim)
        if n_heads is None and hidden_layers is not None:
            n_heads = max(1, hidden_layers)
        if e_layers is None and hidden_layers is not None:
            e_layers = max(1, hidden_layers)
        self.d_model  = d_model  if d_model  is not None else default_d_model
        self.d_ff     = d_ff     if d_ff     is not None else default_d_ff
        self.n_heads  = n_heads  if n_heads  is not None else default_n_heads
        self.e_layers = e_layers if e_layers is not None else default_e_layers
        if self.d_model % self.n_heads != 0:
            for h in range(min(self.n_heads, self.d_model), 0, -1):
                if self.d_model % h == 0:
                    self.n_heads = h
                    break
            if self.verbose:
                print(f"[TimesNetWrapper] Adjusted n_heads to {self.n_heads} so d_model % n_heads == 0.")
        self.factor = factor
        self.top_k = top_k
        self.num_kernels = num_kernels
        self.individual = individual
        self.moving_avg = moving_avg
        self.embed = embed
        self.freq = freq
        self.activation = activation
        self.output_attention = output_attention
        self.distil = distil
        self.mix = mix
        self.task_name = task_name
        self.features = features
        self.apply_output_projection = (self.input_dim != self.output_dim)
        self.loss_type = str(loss_type)
        self.target_is_logvar = bool(target_is_logvar)
        self.nll_eps = float(nll_eps)
        self.clamp_logvar_min = float(clamp_logvar_min)
        self.clamp_logvar_max = float(clamp_logvar_max)
        self.student_df = float(student_df)
        self._rebuild_model_and_opt()

    def _make_config(self):
        return {
            "seq_len": self.input_len,
            "label_len": self.input_len // 2,
            "pred_len": self.output_len,
            "enc_in": self.input_dim,
            "dec_in": self.input_dim,
            "c_out": self.input_dim,
            "individual": self.individual,
            "d_model": self.d_model,
            "d_ff": self.d_ff,
            "n_heads": self.n_heads,
            "e_layers": self.e_layers,
            "dropout": self.dropout,
            "factor": self.factor,
            "top_k": self.top_k,
            "num_kernels": self.num_kernels,
            "task_name": self.task_name,
            "features": self.features,
            "moving_avg": self.moving_avg,
            "embed": self.embed,
            "freq": self.freq,
            "activation": self.activation,
            "output_attention": self.output_attention,
            "distil": self.distil,
            "mix": self.mix,
            "device": self.device,
        }

    def _rebuild_model_and_opt(self):
        cfg = self._make_config()
        self.model = TimesNetModel(configs=SimpleNamespace(**cfg)).to(self.device)
        self.out_head = (nn.Linear(self.input_dim, self.output_dim).to(self.device)
                         if self.apply_output_projection else nn.Identity())
        params = list(self.model.parameters()) + list(self.out_head.parameters())
        self.optimizer = torch.optim.Adam(params, lr=self.lr, weight_decay=self.l2_weight)
        self._mse = nn.MSELoss()

    def _compute_loss(self, y_hat, y_true):
        if self.loss_type == "mse":
            return self._mse(y_hat, y_true)
        z = torch.clamp(y_hat, self.clamp_logvar_min, self.clamp_logvar_max)
        if self.loss_type == "gauss_nll_var":
            v = torch.exp(y_true) if self.target_is_logvar else y_true
            v = torch.clamp(v, min=self.nll_eps)
            loss = v * torch.exp(-z) + z
            return loss.mean()
        elif self.loss_type == "student_t_nll_var":
            v = torch.exp(y_true) if self.target_is_logvar else y_true
            v = torch.clamp(v, min=self.nll_eps)
            nu = torch.tensor(self.student_df, device=z.device, dtype=z.dtype)
            loss = 0.5 * (nu + 1.0) * torch.log1p(v / (nu * torch.exp(z))) + 0.5 * z
            return loss.mean()
        else:
            raise ValueError(f"Unknown loss_type: {self.loss_type}")

    def _save_checkpoint(self, epoch):
        if self.checkpoint_path:
            torch.save({
                'epoch': epoch,
                'model_state_dict': self.model.state_dict(),
                'head_state_dict': self.out_head.state_dict() if isinstance(self.out_head, nn.Linear) else None,
                'optimizer_state_dict': self.optimizer.state_dict()
            }, self.checkpoint_path)
            if self.verbose:
                print(f"Saved checkpoint at epoch {epoch} -> {self.checkpoint_path}")

    def _load_checkpoint(self):
        if self.checkpoint_path and os.path.isfile(self.checkpoint_path):
            ckpt = torch.load(self.checkpoint_path, map_location=self.device)
            self.model.load_state_dict(ckpt['model_state_dict'])
            if ckpt.get('head_state_dict') is not None and isinstance(self.out_head, nn.Linear):
                self.out_head.load_state_dict(ckpt['head_state_dict'])
            self.optimizer.load_state_dict(ckpt['optimizer_state_dict'])
            if self.verbose:
                print(f"Resumed from checkpoint at epoch {ckpt['epoch']}")
            return ckpt['epoch'] + 1
        return 0

    def fit(self, X_train, y_train, X_val, y_val):
        self.model.train()
        start_epoch = self._load_checkpoint()
        best_val_loss = float('inf')
        epochs_no_improve = 0
        for epoch in range(start_epoch, self.epochs):
            epoch_loss = 0.0
            t0 = perf_counter()
            for i in range(0, len(X_train), self.batch_size):
                xb = torch.tensor(X_train[i:i+self.batch_size], dtype=torch.float32, device=self.device)
                yb = torch.tensor(y_train[i:i+self.batch_size], dtype=torch.float32, device=self.device)
                self.optimizer.zero_grad()
                x_dec = torch.zeros((xb.size(0), self.output_len, self.input_dim),
                                    dtype=xb.dtype, device=self.device)
                pred_in = self.model(xb, None, x_dec, None)
                pred = self.out_head(pred_in)
                loss = self._compute_loss(pred, yb)
                loss.backward()
                self.optimizer.step()
                epoch_loss += loss.item()
            self.model.eval()
            with torch.no_grad():
                Xv = torch.tensor(X_val, dtype=torch.float32, device=self.device)
                yv = torch.tensor(y_val, dtype=torch.float32, device=self.device)
                x_dec_v = torch.zeros((Xv.size(0), self.output_len, self.input_dim),
                                      dtype=Xv.dtype, device=self.device)
                pred_in_v = self.model(Xv, None, x_dec_v, None)
                pred_v = self.out_head(pred_in_v)
                val_loss = self._compute_loss(pred_v, yv).item()
            if self.verbose:
                print(f"Epoch {epoch+1}/{self.epochs}  Train {epoch_loss:.4f}  Val {val_loss:.4f}  {perf_counter()-t0:.1f}s")
            improved = (best_val_loss - val_loss) > self.min_delta
            if improved:
                best_val_loss = val_loss
                epochs_no_improve = 0
                self._save_checkpoint(epoch)
            else:
                epochs_no_improve += 1
            if (epoch+1) >= self.min_epochs and epochs_no_improve >= self.patience:
                if self.verbose:
                    print("Early stopping.")
                break
            self.model.train()

    @torch.no_grad()
    def predict(self, X_test):
        self.model.eval()
        Xt = torch.tensor(X_test, dtype=torch.float32, device=self.device)
        x_dec = torch.zeros((Xt.size(0), self.output_len, self.input_dim),
                            dtype=Xt.dtype, device=self.device)
        pred_in = self.model(Xt, None, x_dec, None)
        pred = self.out_head(pred_in)
        return pred.cpu().numpy()

    def get_params(self, deep=True):
        return {
            "input_len": self.input_len, "output_len": self.output_len,
            "input_dim": self.input_dim, "output_dim": self.output_dim, "no_tasks": self.no_tasks,
            "device": self.device, "epochs": self.epochs, "batch_size": self.batch_size,
            "verbose": self.verbose, "lr": self.lr, "checkpoint_path": self.checkpoint_path,
            "l2_weight": self.l2_weight, "patience": self.patience, "min_epochs": self.min_epochs, "min_delta": self.min_delta,
            "d_model": self.d_model, "d_ff": self.d_ff, "n_heads": self.n_heads, "e_layers": self.e_layers,
            "dropout": self.dropout, "factor": self.factor, "top_k": self.top_k, "num_kernels": self.num_kernels,
            "individual": self.individual, "moving_avg": self.moving_avg, "embed": self.embed, "freq": self.freq,
            "activation": self.activation, "output_attention": self.output_attention, "distil": self.distil, "mix": self.mix,
            "task_name": self.task_name, "features": self.features,
            "hidden_dim": None, "hidden_layers": None,
            "loss_type": self.loss_type, "target_is_logvar": self.target_is_logvar,
            "nll_eps": self.nll_eps, "clamp_logvar_min": self.clamp_logvar_min,
            "clamp_logvar_max": self.clamp_logvar_max, "student_df": self.student_df,
        }

    def set_params(self, **params):
        for k, v in params.items():
            setattr(self, k, v)
        if "hidden_dim" in params and params.get("hidden_dim") is not None:
            if "d_model" not in params: self.d_model = params["hidden_dim"]
            if "d_ff" not in params:    self.d_ff = max(4 * self.d_model, params["hidden_dim"])
        if "hidden_layers" in params and params.get("hidden_layers") is not None:
            if "n_heads" not in params: self.n_heads = max(1, params["hidden_layers"])
            if "e_layers" not in params: self.e_layers = max(1, params["hidden_layers"])
        if self.d_model % self.n_heads != 0:
            for h in range(min(self.n_heads, self.d_model), 0, -1):
                if self.d_model % h == 0:
                    self.n_heads = h
                    break
            if self.verbose:
                print(f"[TimesNetWrapper] Adjusted n_heads to {self.n_heads} so d_model % n_heads == 0.")
        self.apply_output_projection = (self.input_dim != self.output_dim)
        self._rebuild_model_and_opt()
        return self

All tasks EURUSD with TimesNet without CV¶

In [715]:
import os
save_eur_test_TimesNet_model_file_path_9 = os.path.join(root_folder, objects_relative_path, "eur_test_TimesNet_model_9.pkl")


t_TN9_results_eur, t_TN9_nested_results_eur, t_TN9_best_model_eur, t_TN9_best_params_eur, _ = train_and_evaluate_model(
    model_type="TimesNet",
    X_price=eur8_X_price,
    X_time=eur8_X_time,
    y=eur8_y,
    no_tasks=1,
    use_nested_cv=False,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=False,
    normalize_y=True,
    flatten=False,
    save_model_path=save_eur_test_TimesNet_model_file_path_9,
    lr=5e-3,
    epochs=50,
    batch_size=512,
    verbose=True,
    time_horizon=20,
    d_model=256,
    d_ff=64,
    n_heads=4,
    e_layers=4,
    target_mode="log_mse"

)
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[TimesNetWrapper] Using device: mps
Batch size for y: 3782
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525025
  Min value:  -0.7148707238463011
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_train_core (log_mse scaled):
Shape: (2723, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.998957169490066
  Min value:  -3.7200073694899434
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.408921165171427
  Min value:  -0.7103978753601863
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_val (log_mse scaled):
Shape: (302, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3212202110774225
  Min value:  -2.9768947428361368
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144182
  Min value:  -0.7173043382357188
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_test (log_mse scaled):
Shape: (757, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3856309038737162
  Min value:  -5.3523385822659755
Epoch 1/50  Train 116.1386  Val 3.6022  24.5s
Epoch 2/50  Train 17.5851  Val 1.3612  23.4s
Epoch 3/50  Train 6.9005  Val 0.9997  23.3s
Epoch 4/50  Train 5.4968  Val 0.9572  23.5s
Epoch 5/50  Train 5.7792  Val 0.9673  23.9s
Epoch 6/50  Train 5.4373  Val 1.0994  26.1s
Epoch 7/50  Train 5.6509  Val 0.9637  28.6s
Epoch 8/50  Train 5.4379  Val 1.0060  30.2s
Epoch 9/50  Train 5.5104  Val 0.9600  28.2s
Epoch 10/50  Train 5.5407  Val 1.0396  27.7s
Epoch 11/50  Train 5.8708  Val 0.9908  27.6s
Epoch 12/50  Train 5.8469  Val 0.9676  27.7s
Epoch 13/50  Train 6.1779  Val 0.9668  28.4s
Epoch 14/50  Train 5.4213  Val 1.0066  27.9s
Epoch 15/50  Train 5.4624  Val 0.9785  28.5s
Epoch 16/50  Train 5.4305  Val 0.9739  30.7s
Epoch 17/50  Train 5.4321  Val 0.9870  28.9s
Epoch 18/50  Train 5.4423  Val 0.9947  29.2s
Epoch 19/50  Train 5.4490  Val 0.9788  29.2s
Epoch 20/50  Train 5.4371  Val 0.9779  29.6s
Epoch 21/50  Train 5.4350  Val 0.9828  28.9s
Epoch 22/50  Train 5.4374  Val 0.9778  29.1s
Epoch 23/50  Train 5.4611  Val 0.9614  28.7s
Epoch 24/50  Train 5.4581  Val 1.0960  29.9s
Epoch 25/50  Train 5.7326  Val 0.9690  30.8s
Epoch 26/50  Train 5.6026  Val 1.1078  29.9s
Epoch 27/50  Train 5.9645  Val 1.0725  30.2s
Epoch 28/50  Train 5.8813  Val 0.9926  33.6s
Epoch 29/50  Train 6.0835  Val 1.7720  30.3s
Epoch 30/50  Train 8.8096  Val 2.1499  31.7s
Epoch 31/50  Train 10.4402  Val 1.5574  31.7s
Epoch 32/50  Train 12.7504  Val 1.5455  31.7s
Epoch 33/50  Train 13.7888  Val 4.0920  34.8s
Epoch 34/50  Train 19.8239  Val 6.0151  31.5s
Epoch 35/50  Train 29.8146  Val 4.0086  33.9s
Epoch 36/50  Train 23.7000  Val 1.7130  33.6s
Epoch 37/50  Train 10.6270  Val 2.6456  36.4s
Epoch 38/50  Train 12.6774  Val 1.3597  34.6s
Epoch 39/50  Train 7.6017  Val 1.6400  33.7s
Epoch 40/50  Train 8.3244  Val 1.2084  33.3s
Epoch 41/50  Train 8.2133  Val 1.2465  32.1s
Epoch 42/50  Train 6.9128  Val 0.9602  34.1s
Epoch 43/50  Train 5.6617  Val 0.9564  37.6s
Epoch 44/50  Train 5.4336  Val 1.0557  34.1s
Epoch 45/50  Train 5.5941  Val 0.9582  31.9s
Epoch 46/50  Train 5.4980  Val 0.9761  37.5s
Epoch 47/50  Train 5.4151  Val 0.9941  36.3s
Epoch 48/50  Train 5.4274  Val 0.9645  33.2s
Epoch 49/50  Train 5.4228  Val 0.9822  35.6s
Epoch 50/50  Train 5.4173  Val 0.9785  34.9s

Parameters used in the single-fit model:
input_len: 60
output_len: 20
input_dim: 1
output_dim: 1
no_tasks: 1
device: mps
epochs: 50
batch_size: 512
verbose: True
lr: 0.00500000
checkpoint_path: None
l2_weight: 0.00000000
patience: 10
min_epochs: 50
min_delta: 0.00010000
d_model: 256
d_ff: 64
n_heads: 4
e_layers: 4
dropout: 0.00000000
factor: 2
top_k: 3
num_kernels: 3
individual: False
moving_avg: 25
embed: timeF
freq: d
activation: gelu
output_attention: False
distil: True
mix: True
task_name: long_term_forecast
features: MS
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.11953831
1 day(s) RMSE                      : 0.21673289
1 day(s) R2                        : -0.02913410
1 day(s) Pearson r                 : 0.34434761
1 day(s) QLIKE                     : 0.50089609
3 day(s) MAE                       : 0.12021996
3 day(s) RMSE                      : 0.21784774
3 day(s) R2                        : -0.04777035
3 day(s) Pearson r                 : 0.32170582
3 day(s) QLIKE                     : 0.50529856
5 day(s) MAE                       : 0.12008998
5 day(s) RMSE                      : 0.21790367
5 day(s) R2                        : -0.06022091
5 day(s) Pearson r                 : 0.30049259
5 day(s) QLIKE                     : 0.50839794
10 day(s) MAE                      : 0.12002516
10 day(s) RMSE                     : 0.21831475
10 day(s) R2                       : -0.07438354
10 day(s) Pearson r                : 0.27805659
10 day(s) QLIKE                    : 0.51306488
20 day(s) MAE                      : 0.12000741
20 day(s) RMSE                     : 0.21868201
20 day(s) R2                       : -0.09071540
20 day(s) Pearson r                : 0.25643350
20 day(s) QLIKE                    : 0.51539713
full horizon MAE                   : 0.12000741
full horizon RMSE                  : 0.21868201
full horizon R2                    : -0.09071540
full horizon Pearson r             : 0.25643350
full horizon QLIKE                 : 0.51539713

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/eur_test_TimesNet_model_9.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.0606662, max=0.544609

OHO with parameter search - TimesNet - all task - data 3 - horizon 1¶

In [ ]:
import os

save_test_TimesNet_model_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_TimesNet_model.pkl"
)

param_grid = {
    "lr": [5e-3],
    #"dropout": [0, 0.1],
    #"l2_weight": [5e-5, 5e-4],
    #"batch_size": [256, 512],
    
    "d_model": [64],
    "d_ff": [256],
    "n_heads": [4],
    "e_layers": [2],
}

t1d3_OHO_TimesNet_results, t1d3_OHO_TimesNet_nested_results, t1d3_OHO_TimesNet_best_model, t1d3_OHO_TimesNet_best_params, t1d3_OHO_TimesNet_y_data  = train_and_evaluate_model(

    param_grid=param_grid,

    verbose=True,
    use_nested_cv=False,
    single_holdout=True,

    merge_price_time=False,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,

    # some models might not need this
    min_delta=1e-4,

    # model name for each
    model_type="TimesNet",

    # data for each scenario
    X_price=eur4_X_price,
    X_time=eur4_X_time,
    y=eur4_y,
    
    # specify these for each data
    no_tasks=eur4_y.shape[2],
    flatten=False,
    time_horizon=28,

    # these have to be adjusted for some models
    patience=2,
    epochs = 1,
    min_epochs=50,

    # saving model
    save_model_path=save_test_OHO_LSTM_KAN_model_file_path,

    # these go into the parameter grid
    lr=1e-3,
    dropout=0,
    l2_weight=1e-4,
    batch_size=4,
    
    # specific to TimeNet
    d_model=64,
    d_ff=256,
    n_heads=4,
    e_layers=2
)
model parameter(s) from the grid will overwrite any overlapping parameters provided directly to this function
[TimesNetWrapper] Using device: mps
Batch size for y: 3782
Time steps for y: 28
Features for y: 2

Single holdout: one train/test split, param search on a single train/val (early-stop) split.
[TimesNetWrapper] Using device: mps
Epoch 1/1  Train 8.9520  Val 0.0155  25.7s

Best parameters found in single holdout:
  d_ff: 254
  d_model: 64
  e_layers: 2
  lr: 0.005
  n_heads: 4

Results for the best model from sigle hold out evaluation:

--- Task 1 ---
1 day(s) MAE (log-var)             : 0.81650851
1 day(s) RMSE (log-var)            : 1.27389565
1 day(s) R2 (log-var)              : 0.07279190
1 day(s) MAE (var)                 : 0.10757523
1 day(s) RMSE (var)                : 0.20148224
1 day(s) R2 (var)                  : 0.11059530
1 day(s) QLIKE (var)               : 0.48870430
5 day(s) MAE (log-var)             : 0.82052811
5 day(s) RMSE (log-var)            : 1.28208969
5 day(s) R2 (log-var)              : 0.05698200
5 day(s) MAE (var)                 : 0.10804338
5 day(s) RMSE (var)                : 0.20161883
5 day(s) R2 (var)                  : 0.09231951
5 day(s) QLIKE (var)               : 0.49507399
10 day(s) MAE (log-var)            : 0.82326124
10 day(s) RMSE (log-var)           : 1.28656853
10 day(s) R2 (log-var)             : 0.04793778
10 day(s) MAE (var)                : 0.10852924
10 day(s) RMSE (var)               : 0.20207523
10 day(s) R2 (var)                 : 0.07950176
10 day(s) QLIKE (var)              : 0.49945789
20 day(s) MAE (log-var)            : 0.82599889
20 day(s) RMSE (log-var)           : 1.29168608
20 day(s) R2 (log-var)             : 0.03331686
20 day(s) MAE (var)                : 0.10954185
20 day(s) RMSE (var)               : 0.20214579
20 day(s) R2 (var)                 : 0.06799474
20 day(s) QLIKE (var)              : 0.50297977
full horizon MAE (log-var)         : 0.82944822
full horizon RMSE (log-var)        : 1.29706636
full horizon R2 (log-var)          : 0.02187336
full horizon MAE (var)             : 0.11047549
full horizon RMSE (var)            : 0.20230433
full horizon R2 (var)              : 0.06147194
full horizon QLIKE (var)           : 0.50562124

--- Task 2 ---
1 day(s) MAE (log-var)             : 0.30469243
1 day(s) RMSE (log-var)            : 0.42313402
1 day(s) R2 (log-var)              : -0.01673905
1 day(s) MAE (var)                 : 0.34857166
1 day(s) RMSE (var)                : 0.74975049
1 day(s) R2 (var)                  : -0.01992211
1 day(s) QLIKE (var)               : 0.09972194
5 day(s) MAE (log-var)             : 0.30282973
5 day(s) RMSE (log-var)            : 0.42099436
5 day(s) R2 (log-var)              : -0.01611864
5 day(s) MAE (var)                 : 0.34547222
5 day(s) RMSE (var)                : 0.74698410
5 day(s) R2 (var)                  : -0.02043361
5 day(s) QLIKE (var)               : 0.09904869
10 day(s) MAE (log-var)            : 0.30265280
10 day(s) RMSE (log-var)           : 0.42110758
10 day(s) R2 (log-var)             : -0.01730427
10 day(s) MAE (var)                : 0.34485346
10 day(s) RMSE (var)               : 0.74660340
10 day(s) R2 (var)                 : -0.02103765
10 day(s) QLIKE (var)              : 0.09913759
20 day(s) MAE (log-var)            : 0.30305197
20 day(s) RMSE (log-var)           : 0.42209122
20 day(s) R2 (log-var)             : -0.01794885
20 day(s) MAE (var)                : 0.34485706
20 day(s) RMSE (var)               : 0.74667121
20 day(s) R2 (var)                 : -0.02117940
20 day(s) QLIKE (var)              : 0.09938547
full horizon MAE (log-var)         : 0.30374141
full horizon RMSE (log-var)        : 0.42343072
full horizon R2 (log-var)          : -0.01862380
full horizon MAE (var)             : 0.34569119
full horizon RMSE (var)            : 0.74810879
full horizon R2 (var)              : -0.02189069
full horizon QLIKE (var)           : 0.09994036

Best single-holdout model saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/test_OHO_LSTM_KAN_model.pkl

ITransformer¶

ITransformer Wrapper¶

In [132]:
import os
from types import SimpleNamespace
from time import perf_counter
import torch
import torch.nn as nn
from models.iTransformer import Model as ITransformerModel

class ITransformerWrapper:
    def __init__(
        self,
        input_len, output_len, input_dim, output_dim, no_tasks,
        device=None,
        epochs=10, batch_size=32, verbose=False, lr=1e-3, checkpoint_path=None,
        dropout=0.0, l2_weight=1e-5, patience=10, min_epochs=50, min_delta=1e-4,
        hidden_dim=None,
        hidden_layers=None,
        d_model=None,
        d_ff=None,
        n_heads=None,
        e_layers=None,
        attn="prob", activation="gelu", output_attention=False, distil=True,
        embed="timeF", freq="d", features="MS", moving_avg=25, factor=3,
        task_name="long_term_forecast",
        loss_type="mse",
        target_is_logvar=True,
        nll_eps=1e-12,
        clamp_logvar_min=-20.0,
        clamp_logvar_max=20.0,
        student_df=5.0,
    ):
        self.input_len  = input_len
        self.output_len = output_len
        self.input_dim  = input_dim
        self.output_dim = output_dim
        self.no_tasks   = no_tasks
        self.device = device or ("mps" if torch.backends.mps.is_available()
                                 else ("cuda" if torch.cuda.is_available() else "cpu"))
        print(f"[ITransformerWrapper] Using device: {self.device}")
        self.epochs = epochs
        self.batch_size = batch_size
        self.verbose = verbose
        self.lr = lr
        self.checkpoint_path = checkpoint_path
        self.dropout = dropout
        self.l2_weight = l2_weight
        self.patience = patience
        self.min_epochs = min_epochs
        self.min_delta = min_delta
        default_d_model  = 64
        default_d_ff     = 256
        default_n_heads  = 4
        default_e_layers = 2
        if d_model is None and hidden_dim is not None:
            d_model = hidden_dim
        if d_ff is None and hidden_dim is not None:
            d_ff = max(4 * (d_model if d_model is not None else default_d_model), hidden_dim)
        if n_heads is None and hidden_layers is not None:
            n_heads = max(1, hidden_layers)
        if e_layers is None and hidden_layers is not None:
            e_layers = max(1, hidden_layers)
        self.d_model  = d_model  if d_model  is not None else default_d_model
        self.d_ff     = d_ff     if d_ff     is not None else default_d_ff
        self.n_heads  = n_heads  if n_heads  is not None else default_n_heads
        self.e_layers = e_layers if e_layers is not None else default_e_layers
        if self.d_model % self.n_heads != 0:
            for h in range(min(self.n_heads, self.d_model), 0, -1):
                if self.d_model % h == 0:
                    self.n_heads = h
                    break
            if self.verbose:
                print(f"[ITransformerWrapper] Adjusted n_heads to {self.n_heads} so d_model % n_heads == 0.")
        self.attn = attn
        self.activation = activation
        self.output_attention = output_attention
        self.distil = distil
        self.embed = embed
        self.freq = freq
        self.features = features
        self.moving_avg = moving_avg
        self.factor = factor
        self.task_name = task_name
        self.loss_type = str(loss_type)
        self.target_is_logvar = bool(target_is_logvar)
        self.nll_eps = float(nll_eps)
        self.clamp_logvar_min = float(clamp_logvar_min)
        self.clamp_logvar_max = float(clamp_logvar_max)
        self.student_df = float(student_df)
        self._rebuild()

    def _make_config(self):
        return {
            "task_name": self.task_name,
            "seq_len": self.input_len,
            "label_len": self.input_len // 2,
            "pred_len": self.output_len,
            "enc_in": self.input_dim,
            "dec_in": self.input_dim,
            "c_out": self.output_dim,
            "e_layers": self.e_layers,
            "d_model": self.d_model,
            "n_heads": self.n_heads,
            "d_ff": self.d_ff,
            "dropout": self.dropout,
            "attn": self.attn,
            "activation": self.activation,
            "output_attention": self.output_attention,
            "distil": self.distil,
            "embed": self.embed,
            "freq": self.freq,
            "device": self.device,
            "num_workers": 0,
            "features": self.features,
            "moving_avg": self.moving_avg,
            "factor": self.factor,
        }

    def _rebuild(self):
        cfg = self._make_config()
        self.model = ITransformerModel(configs=SimpleNamespace(**cfg)).to(self.device)
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.l2_weight)
        self._mse = nn.MSELoss()

    def _compute_loss(self, y_hat, y_true):
        if self.loss_type == "mse":
            return self._mse(y_hat, y_true)
        z = torch.clamp(y_hat, self.clamp_logvar_min, self.clamp_logvar_max)
        if self.loss_type == "gauss_nll_var":
            v = torch.exp(y_true) if self.target_is_logvar else y_true
            v = torch.clamp(v, min=self.nll_eps)
            loss = v * torch.exp(-z) + z
            return loss.mean()
        elif self.loss_type == "student_t_nll_var":
            v = torch.exp(y_true) if self.target_is_logvar else y_true
            v = torch.clamp(v, min=self.nll_eps)
            nu = torch.tensor(self.student_df, device=z.device, dtype=z.dtype)
            loss = 0.5 * (nu + 1.0) * torch.log1p(v / (nu * torch.exp(z))) + 0.5 * z
            return loss.mean()
        else:
            raise ValueError(f"Unknown loss_type: {self.loss_type}")

    def _save_checkpoint(self, epoch):
        if self.checkpoint_path:
            torch.save({
                "epoch": epoch,
                "model_state_dict": self.model.state_dict(),
                "optimizer_state_dict": self.optimizer.state_dict(),
            }, self.checkpoint_path)
            if self.verbose:
                print(f"Saved checkpoint at epoch {epoch} to: {self.checkpoint_path}")

    def _load_checkpoint(self):
        if self.checkpoint_path and os.path.isfile(self.checkpoint_path):
            ckpt = torch.load(self.checkpoint_path, map_location=self.device)
            self.model.load_state_dict(ckpt["model_state_dict"])
            self.optimizer.load_state_dict(ckpt["optimizer_state_dict"])
            if self.verbose:
                print(f"Resumed from checkpoint at epoch {ckpt['epoch']}")
            return ckpt["epoch"] + 1
        return 0

    def fit(self, X_train, y_train, X_val, y_val):
        self.model.train()
        start_epoch = self._load_checkpoint()
        best_val = float("inf")
        bad_epochs = 0
        for epoch in range(start_epoch, self.epochs):
            epoch_loss = 0.0
            t0 = perf_counter()
            for i in range(0, len(X_train), self.batch_size):
                xb = torch.tensor(X_train[i:i+self.batch_size], dtype=torch.float32, device=self.device)
                yb = torch.tensor(y_train[i:i+self.batch_size], dtype=torch.float32, device=self.device)
                self.optimizer.zero_grad()
                x_dec = torch.zeros((xb.size(0), self.output_len, self.output_dim), dtype=xb.dtype, device=self.device)
                pred = self.model(xb, None, x_dec, None)
                loss = self._compute_loss(pred, yb)
                loss.backward()
                self.optimizer.step()
                epoch_loss += loss.item()
            self.model.eval()
            with torch.no_grad():
                Xv = torch.tensor(X_val, dtype=torch.float32, device=self.device)
                yv = torch.tensor(y_val, dtype=torch.float32, device=self.device)
                x_dec_v = torch.zeros((Xv.size(0), self.output_len, self.output_dim), dtype=Xv.dtype, device=self.device)
                pv = self.model(Xv, None, x_dec_v, None)
                val = self._compute_loss(pv, yv).item()
            if self.verbose:
                print(f"Epoch {epoch+1}/{self.epochs}  Train {epoch_loss:.4f}  Val {val:.4f}  {perf_counter()-t0:.1f}s")
            if (best_val - val) > self.min_delta:
                best_val = val
                bad_epochs = 0
                self._save_checkpoint(epoch)
            else:
                bad_epochs += 1
            if (epoch + 1) >= self.min_epochs and bad_epochs >= self.patience:
                if self.verbose:
                    print("Early stopping triggered.")
                break
            self.model.train()

    @torch.no_grad()
    def predict(self, X_test):
        self.model.eval()
        Xt = torch.tensor(X_test, dtype=torch.float32, device=self.device)
        x_dec = torch.zeros((Xt.size(0), self.output_len, self.output_dim), dtype=Xt.dtype, device=self.device)
        pred = self.model(Xt, None, x_dec, None)
        return pred.cpu().numpy()

    def get_params(self, deep=True):
        return {
            "input_len": self.input_len, "output_len": self.output_len,
            "input_dim": self.input_dim, "output_dim": self.output_dim, "no_tasks": self.no_tasks,
            "device": self.device, "epochs": self.epochs, "batch_size": self.batch_size,
            "verbose": self.verbose, "lr": self.lr, "checkpoint_path": self.checkpoint_path,
            "dropout": self.dropout, "l2_weight": self.l2_weight, "patience": self.patience,
            "min_epochs": self.min_epochs, "min_delta": self.min_delta,
            "d_model": self.d_model, "d_ff": self.d_ff, "n_heads": self.n_heads, "e_layers": self.e_layers,
            "attn": self.attn, "activation": self.activation, "output_attention": self.output_attention,
            "distil": self.distil, "embed": self.embed, "freq": self.freq, "features": self.features,
            "moving_avg": self.moving_avg, "factor": self.factor, "task_name": self.task_name,
            "hidden_dim": None, "hidden_layers": None,
            "loss_type": self.loss_type, "target_is_logvar": self.target_is_logvar,
            "nll_eps": self.nll_eps, "clamp_logvar_min": self.clamp_logvar_min,
            "clamp_logvar_max": self.clamp_logvar_max, "student_df": self.student_df,
        }

    def set_params(self, **params):
        for k, v in params.items():
            setattr(self, k, v)
        if "hidden_dim" in params and params.get("hidden_dim") is not None:
            if "d_model" not in params: self.d_model = params["hidden_dim"]
            if "d_ff" not in params:    self.d_ff = max(4 * self.d_model, params["hidden_dim"])
        if "hidden_layers" in params and params.get("hidden_layers") is not None:
            if "n_heads" not in params: self.n_heads = max(1, params["hidden_layers"])
            if "e_layers" not in params: self.e_layers = max(1, params["hidden_layers"])
        if self.d_model % self.n_heads != 0:
            for h in range(min(self.n_heads, self.d_model), 0, -1):
                if self.d_model % h == 0:
                    self.n_heads = h
                    break
            if self.verbose:
                print(f"[ITransformerWrapper] Adjusted n_heads to {self.n_heads} so d_model % n_heads == 0.")
        rebuild_keys = {"input_len","output_len","input_dim","output_dim","dropout","d_model","d_ff","n_heads","e_layers","attn","activation","output_attention","distil","embed","freq","features","moving_avg","factor","task_name","device"}
        if any(k in rebuild_keys for k in params):
            self._rebuild()
        return self

Test All task EURUSD with ITransformer without CV¶

In [803]:
import os
save_eur_test_ITRS_model_file_path_8 = os.path.join(root_folder, objects_relative_path, "eur_test_ITRS_model_8.pkl")


print(eur8_X_price.shape[2])
print(eur8_y.shape[2])

t_ITRS8_results_eur, t_ITRS8_nested_results_eur, t_ITRS8_best_model_eur, t_ITRS8_best_params_eur, _ = train_and_evaluate_model(
    model_type="ITransformer",
    X_price=eur8_X_price,
    X_time=eur8_X_time,
    y=eur8_y,
    no_tasks=1,
    use_nested_cv=False,
    merge_price_time=False,
    normalize_X=True,
    normalize_Time=False,
    normalize_y=True,
    flatten=False,
    save_model_path=save_eur_test_ITRS_model_file_path_8,
    lr=1e-4,
    epochs=50,
    batch_size=128,
    verbose=True,
    time_horizon=1,
    
    d_model=64,
    d_ff=256,
    n_heads=4,
    e_layers=2,
    
    target_mode="log_mse"

)
1
6
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 3782
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525025
  Min value:  -0.7148707238463011
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_train_core (log_mse scaled):
Shape: (2723, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.9963965101732293
  Min value:  -3.718602223017603
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.408921165171427
  Min value:  -0.7103978753601863
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_val (log_mse scaled):
Shape: (302, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.319059579642504
  Min value:  -2.975928211620611
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144182
  Min value:  -0.7173043382357188
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.0
  Min value:  -1.0
Checking y_test (log_mse scaled):
Shape: (757, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3834322546316056
  Min value:  -5.349969967608034
Epoch 1/50  Train 20.0067  Val 0.6610  0.5s
Epoch 2/50  Train 15.8688  Val 0.5731  0.2s
Epoch 3/50  Train 14.2884  Val 0.5238  0.2s
Epoch 4/50  Train 13.3456  Val 0.4948  0.1s
Epoch 5/50  Train 12.6880  Val 0.4775  0.1s
Epoch 6/50  Train 12.1958  Val 0.4664  0.1s
Epoch 7/50  Train 11.8065  Val 0.4593  0.2s
Epoch 8/50  Train 11.4853  Val 0.4548  0.1s
Epoch 9/50  Train 11.2120  Val 0.4522  0.2s
Epoch 10/50  Train 10.9735  Val 0.4510  0.2s
Epoch 11/50  Train 10.7610  Val 0.4509  0.1s
Epoch 12/50  Train 10.5687  Val 0.4517  0.1s
Epoch 13/50  Train 10.3928  Val 0.4533  0.1s
Epoch 14/50  Train 10.2306  Val 0.4554  0.2s
Epoch 15/50  Train 10.0803  Val 0.4580  0.1s
Epoch 16/50  Train 9.9404  Val 0.4611  0.1s
Epoch 17/50  Train 9.8099  Val 0.4644  0.1s
Epoch 18/50  Train 9.6878  Val 0.4680  0.1s
Epoch 19/50  Train 9.5734  Val 0.4719  0.2s
Epoch 20/50  Train 9.4661  Val 0.4758  0.1s
Epoch 21/50  Train 9.3651  Val 0.4799  0.2s
Epoch 22/50  Train 9.2700  Val 0.4840  0.2s
Epoch 23/50  Train 9.1801  Val 0.4882  0.2s
Epoch 24/50  Train 9.0952  Val 0.4924  0.1s
Epoch 25/50  Train 9.0147  Val 0.4967  0.1s
Epoch 26/50  Train 8.9381  Val 0.5009  0.2s
Epoch 27/50  Train 8.8653  Val 0.5051  0.2s
Epoch 28/50  Train 8.7957  Val 0.5093  0.1s
Epoch 29/50  Train 8.7292  Val 0.5134  0.2s
Epoch 30/50  Train 8.6654  Val 0.5175  0.1s
Epoch 31/50  Train 8.6040  Val 0.5216  0.1s
Epoch 32/50  Train 8.5449  Val 0.5256  0.1s
Epoch 33/50  Train 8.4878  Val 0.5296  0.1s
Epoch 34/50  Train 8.4326  Val 0.5336  0.1s
Epoch 35/50  Train 8.3791  Val 0.5374  0.1s
Epoch 36/50  Train 8.3271  Val 0.5413  0.1s
Epoch 37/50  Train 8.2766  Val 0.5451  0.1s
Epoch 38/50  Train 8.2273  Val 0.5488  0.1s
Epoch 39/50  Train 8.1792  Val 0.5525  0.1s
Epoch 40/50  Train 8.1322  Val 0.5562  0.1s
Epoch 41/50  Train 8.0862  Val 0.5598  0.1s
Epoch 42/50  Train 8.0412  Val 0.5634  0.1s
Epoch 43/50  Train 7.9970  Val 0.5669  0.1s
Epoch 44/50  Train 7.9536  Val 0.5705  0.1s
Epoch 45/50  Train 7.9110  Val 0.5739  0.1s
Epoch 46/50  Train 7.8691  Val 0.5774  0.1s
Epoch 47/50  Train 7.8278  Val 0.5808  0.1s
Epoch 48/50  Train 7.7872  Val 0.5843  0.1s
Epoch 49/50  Train 7.7471  Val 0.5877  0.1s
Epoch 50/50  Train 7.7076  Val 0.5911  0.1s
Early stopping triggered.

Parameters used in the single-fit model:
input_len: 60
output_len: 1
input_dim: 1
output_dim: 1
no_tasks: 1
device: mps
epochs: 50
batch_size: 128
verbose: True
lr: 0.00010000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00000000
patience: 10
min_epochs: 50
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 2
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.11062735
1 day(s) RMSE                      : 0.24461457
1 day(s) R2                        : -0.31095260
1 day(s) Pearson r                 : 0.54062598
1 day(s) QLIKE                     : 0.43211135
full horizon MAE                   : 0.11062735
full horizon RMSE                  : 0.24461457
full horizon R2                    : -0.31095260
full horizon Pearson r             : 0.54062598
full horizon QLIKE                 : 0.43211135

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/eur_test_ITRS_model_8.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00157381, max=2.72273

OHO with parameter search - iTransformer - all task - data 3 - horizon 1¶

In [ ]:
import os

save_test_ITransformer_model_file_path = os.path.join(
    root_folder,
    objects_relative_path,
    "test_ITransformer_model.pkl"
)

param_grid = {
    "lr": [5e-3],
    #"dropout": [0, 0.1],
    #"l2_weight": [5e-5, 5e-4],
    #"batch_size": [256, 512],
    "d_model": [64],
    "d_ff": [256],
    "n_heads": [4],
    "e_layers": [2],
}

t1d3_OHO_ITransformer_results, t1d3_OHO_ITransformer_nested_results, t1d3_OHO_ITransformer_best_model, t1d3_OHO_ITransformer_best_params, t1d3_OHO_ITransformer_y_data  = train_and_evaluate_model(

    param_grid=param_grid,

    verbose=True,
    use_nested_cv=False,
    single_holdout=True,

    merge_price_time=False,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,

    # some models might not need this
    min_delta=1e-4,

    # model name for each
    model_type="ITransformer",

    # data for each scenario
    X_price=eur9_X_price,
    X_time=eur9_X_time,
    y=eur9_y,

    # specify these for each data
    no_tasks=eur9_y.shape[2],
    flatten=False,
    time_horizon=28,

    # these have to be adjusted for some models
    patience=2,
    epochs = 50,
    min_epochs=50,

    # saving model
    save_model_path=save_test_ITransformer_model_file_path,

    # these go into the parameter grid
    lr=1e-3,
    dropout=0,
    l2_weight=1e-4,
    batch_size=4,
    
    hidden_layers=2,
    hidden_dim=32,

    # specific to ITransformer:
    d_model=64,
    d_ff=256,
    n_heads=4,
    e_layers=2
)
[mode=log_var_ratio] loss_type=mse, target_is_logvar=False, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 3782
Time steps for y: 28
Features for y: 2

Single holdout: one train/test split, param search on a single train/val (early-stop) split.
[ITransformerWrapper] Using device: mps
Epoch 1/50  Train 1087.3472  Val 21.5468  4.5s
Epoch 2/50  Train 839.3814  Val 14.0376  4.5s
Epoch 3/50  Train 827.5597  Val 1.2781  4.3s
Epoch 4/50  Train 786.9254  Val 1.3413  4.4s
Epoch 5/50  Train 823.9570  Val 2.9974  4.5s
Epoch 6/50  Train 792.4536  Val 4.5769  4.6s
Epoch 7/50  Train 801.2790  Val 93.4962  4.6s
Epoch 8/50  Train 785.3818  Val 10.1032  4.8s
Epoch 9/50  Train 803.2856  Val 6.6489  4.6s
Epoch 10/50  Train 816.0592  Val 26.5699  4.9s
Epoch 11/50  Train 816.8220  Val 14.0419  4.4s
Epoch 12/50  Train 839.0234  Val 17.0919  4.5s
Epoch 13/50  Train 813.6907  Val 10.0767  4.5s
Epoch 14/50  Train 783.8769  Val 9.8195  4.7s
Epoch 15/50  Train 796.6099  Val 9.9969  4.6s
Epoch 16/50  Train 792.6564  Val 6.8186  4.4s
Epoch 17/50  Train 800.0370  Val 12.0580  4.4s
Epoch 18/50  Train 822.7716  Val 24.0334  4.4s
Epoch 19/50  Train 814.3003  Val 25.8565  4.5s
Epoch 20/50  Train 853.4293  Val 17.3778  4.4s
Epoch 21/50  Train 820.1548  Val 3.1449  4.4s
Epoch 22/50  Train 809.1714  Val 58.2582  4.4s
Epoch 23/50  Train 812.9411  Val 11.4833  4.4s
Epoch 24/50  Train 815.5600  Val 14.2370  4.4s
Epoch 25/50  Train 822.2740  Val 28.5312  4.4s
Epoch 26/50  Train 1223.1283  Val 19.2804  4.5s
Epoch 27/50  Train 845.5810  Val 1.4031  4.4s
Epoch 28/50  Train 839.8259  Val 3.0496  4.4s
Epoch 29/50  Train 810.6021  Val 2.3458  4.4s
Epoch 30/50  Train 815.3922  Val 2.0710  4.5s
Epoch 31/50  Train 811.9153  Val 3.2759  4.6s
Epoch 32/50  Train 809.6778  Val 2.0042  4.4s
Epoch 33/50  Train 820.3989  Val 4.5912  4.6s
Epoch 34/50  Train 802.8380  Val 2.6348  4.5s
Epoch 35/50  Train 806.8375  Val 4.6025  4.7s
Epoch 36/50  Train 993.5795  Val 21.0750  4.4s
Epoch 37/50  Train 803.9765  Val 1.4148  5.4s
Epoch 38/50  Train 836.5816  Val 11.2053  4.4s
Epoch 39/50  Train 825.2896  Val 6.7030  4.4s
Epoch 40/50  Train 807.9391  Val 4.2100  4.4s
Epoch 41/50  Train 821.0650  Val 40.5931  4.4s
Epoch 42/50  Train 823.2032  Val 6.7066  4.4s
Epoch 43/50  Train 813.2474  Val 6.5751  4.4s
Epoch 44/50  Train 799.8728  Val 24.1139  4.4s
Epoch 45/50  Train 862.9803  Val 10.8057  4.4s
Epoch 46/50  Train 833.0010  Val 25.8066  4.4s
Epoch 47/50  Train 806.2162  Val 4.6622  4.4s
Epoch 48/50  Train 799.4888  Val 4.7479  4.4s
Epoch 49/50  Train 807.3868  Val 5.6666  4.4s
Epoch 50/50  Train 821.4557  Val 7.7336  4.4s
Early stopping triggered.

Best parameters found in single holdout (grid):
  d_ff: 256
  d_model: 64
  e_layers: 2
  lr: 0.005
  n_heads: 4

Refitting with selected hyperparameters on (train_core + train_val), new early-stop split from that union…
[ITransformerWrapper] Using device: mps
Epoch 1/50  Train 778.4017  Val 52.3412  4.4s
Epoch 2/50  Train 809.2608  Val 32.2755  4.4s
Epoch 3/50  Train 809.3679  Val 11.0243  4.4s
Epoch 4/50  Train 788.7047  Val 4.0465  4.5s
Epoch 5/50  Train 800.6716  Val 10.4544  4.5s
Epoch 6/50  Train 818.3347  Val 6.2930  4.4s
Epoch 7/50  Train 808.3282  Val 5.6543  4.5s
Epoch 8/50  Train 840.8377  Val 18.9487  4.4s
Epoch 9/50  Train 807.4513  Val 47.9825  4.5s
Epoch 10/50  Train 817.5101  Val 7.9060  4.5s
Epoch 11/50  Train 838.2396  Val 14.5890  4.4s
Epoch 12/50  Train 831.4466  Val 60.9383  4.3s
Epoch 13/50  Train 836.1850  Val 81.6192  4.4s
Epoch 14/50  Train 824.2687  Val 2.9463  4.4s
Epoch 15/50  Train 824.6539  Val 8.1547  4.4s
Epoch 16/50  Train 811.4833  Val 6.9473  4.3s
Epoch 17/50  Train 790.3770  Val 30.9246  4.4s
Epoch 18/50  Train 840.1710  Val 34.9243  4.4s
Epoch 19/50  Train 797.1199  Val 3.2593  4.4s
Epoch 20/50  Train 797.9213  Val 8.3437  4.4s
Epoch 21/50  Train 824.5640  Val 10.6512  4.4s
Epoch 22/50  Train 862.3645  Val 17.7547  4.6s
Epoch 23/50  Train 836.8259  Val 84.5915  4.4s
Epoch 24/50  Train 840.4047  Val 20.4057  4.3s
Epoch 25/50  Train 832.3700  Val 4.0024  4.3s
Epoch 26/50  Train 827.1364  Val 13.5759  4.4s
Epoch 27/50  Train 813.4445  Val 2.1575  4.3s
Epoch 28/50  Train 772.1198  Val 1.4681  4.4s
Epoch 29/50  Train 778.8743  Val 8.0640  4.4s
Epoch 30/50  Train 838.9962  Val 17.0537  4.3s
Epoch 31/50  Train 904.9647  Val 14.0029  4.3s
Epoch 32/50  Train 822.8615  Val 2.8273  4.4s
Epoch 33/50  Train 829.9406  Val 9.5769  4.3s
Epoch 34/50  Train 822.4383  Val 3.2647  4.3s
Epoch 35/50  Train 832.1558  Val 3.3220  4.4s
Epoch 36/50  Train 828.5571  Val 59.3234  4.3s
Epoch 37/50  Train 791.0894  Val 20.7131  4.3s
Epoch 38/50  Train 772.4258  Val 4.2178  4.3s
Epoch 39/50  Train 813.5990  Val 8.5702  4.3s
Epoch 40/50  Train 853.4989  Val 30.4546  4.4s
Epoch 41/50  Train 847.3138  Val 17.6805  4.4s
Epoch 42/50  Train 826.3270  Val 1.6980  4.3s
Epoch 43/50  Train 809.1862  Val 8.2035  4.4s
Epoch 44/50  Train 817.0469  Val 65.8593  4.4s
Epoch 45/50  Train 826.5142  Val 3.9596  4.4s
Epoch 46/50  Train 802.7560  Val 20.2774  4.3s
Epoch 47/50  Train 809.0999  Val 4.7626  4.3s
Epoch 48/50  Train 796.9779  Val 5.4513  4.4s
Epoch 49/50  Train 809.6859  Val 2.9171  4.6s
Epoch 50/50  Train 827.8608  Val 15.3343  4.4s
Early stopping triggered.

Results for the refit model (single holdout):

--- Task 1 ---
1 day(s) MAE                       : 17.54991214
1 day(s) RMSE                      : 83.33589416
1 day(s) R2                        : -152154.10950240
1 day(s) Pearson r                 : 0.25075678
1 day(s) QLIKE                     : 1.56570299
3 day(s) MAE                       : 17.44675449
3 day(s) RMSE                      : 82.82175523
3 day(s) R2                        : -151441.87058045
3 day(s) Pearson r                 : 0.23234911
3 day(s) QLIKE                     : 1.57260817
5 day(s) MAE                       : 17.43576307
5 day(s) RMSE                      : 82.77497175
5 day(s) R2                        : -152989.81145626
5 day(s) Pearson r                 : 0.20546074
5 day(s) QLIKE                     : 1.58593820
10 day(s) MAE                      : 17.16006614
10 day(s) RMSE                     : 81.40399678
10 day(s) R2                       : -149376.31596726
10 day(s) Pearson r                : 0.18169619
10 day(s) QLIKE                    : 1.63377881
20 day(s) MAE                      : 16.75686248
20 day(s) RMSE                     : 79.41041311
20 day(s) R2                       : -143825.94365717
20 day(s) Pearson r                : 0.14143044
20 day(s) QLIKE                    : 1.65560892
full horizon MAE                   : 16.70763077
full horizon RMSE                  : 79.14899050
full horizon R2                    : -143655.09126308
full horizon Pearson r             : 0.12965748
full horizon QLIKE                 : 1.68016603

--- Task 2 ---
1 day(s) MAE                       : 0.23288420
1 day(s) RMSE                      : 0.71200473
1 day(s) R2                        : -70.02431082
1 day(s) Pearson r                 : 0.41471879
1 day(s) QLIKE                     : 0.40409938
3 day(s) MAE                       : 0.23349916
3 day(s) RMSE                      : 0.71192313
3 day(s) R2                        : -70.19015558
3 day(s) Pearson r                 : 0.41606060
3 day(s) QLIKE                     : 0.40501343
5 day(s) MAE                       : 0.23378195
5 day(s) RMSE                      : 0.71198999
5 day(s) R2                        : -70.04047294
5 day(s) Pearson r                 : 0.41474383
5 day(s) QLIKE                     : 0.40456905
10 day(s) MAE                      : 0.23424545
10 day(s) RMSE                     : 0.71219872
10 day(s) R2                       : -69.78576030
10 day(s) Pearson r                : 0.41107686
10 day(s) QLIKE                    : 0.40628523
20 day(s) MAE                      : 0.23370814
20 day(s) RMSE                     : 0.71166203
20 day(s) R2                       : -70.83710231
20 day(s) Pearson r                : 0.42000434
20 day(s) QLIKE                    : 0.40320620
full horizon MAE                   : 0.23277048
full horizon RMSE                  : 0.71083669
full horizon R2                    : -71.24617445
full horizon Pearson r             : 0.43110690
full horizon QLIKE                 : 0.40069991

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00754228, max=917.637

Best single-holdout (refit) model saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/test_ITransformer_model.pkl

Run All¶

Call drafts¶

Do not run them, it will take forever.

In [ ]:
import os

time_horizon=60,

save_MLP = os.path.join(
    root_folder,
    objects_relative_path,
    "MLP_model.pkl"
)

param_grid_MLP = {
    "lr": [5e-4, 5e-3],
    "dropout": [0, 0.1],
    "l2_weight": [1e-5, 1e-4],
    "batch_size": [128, 512],
    "hidden_layers": [2, 4],
    "hidden_dim": [32, 64]
}

res, nes_res, best_m, best_par, y_data  = train_and_evaluate_model(

    param_grid=param_grid_MLP,

    verbose=True,
    use_nested_cv=False,
    single_holdout=True,

    merge_price_time=True,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,

    # some models might not need this
    min_delta=1e-4,

    # model name for each
    model_type="Simple_MLP",

    # data for each scenario
    X_price=eur4_X_price,
    X_time=eur4_X_time,
    y=eur4_y,
    
    # specify these for each data
    no_tasks=1,
    flatten=True,
    time_horizon=time_horizon,

    # these have to be adjusted for some models
    patience=10,
    epochs=50,
    min_epochs=30,

    # saving model
    save_model_path=save_MLP,

    # these go into the parameter grid
    lr=1e-5,
    dropout=0,
    l2_weight=1e-4,
    batch_size=64,
    
    hidden_layers=3,
    hidden_dim=256
)


save_LSTM = os.path.join(
    root_folder,
    objects_relative_path,
    "LSTM_model.pkl"
)

param_grid_LSTM = {
    "lr": [5e-4, 5e-3],
    "dropout": [0, 0.1],
    "l2_weight": [1e-5, 1e-4],
    "batch_size": [16, 32],
    "hidden_layers": [2, 4],
    "hidden_dim": [32, 64]
}

res, nes_res, best_m, best_par, y_data  = train_and_evaluate_model(

    param_grid=param_grid_LSTM,

    verbose=True,
    use_nested_cv=False,
    single_holdout=True,

    merge_price_time=True,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,

    # some models might not need this
    min_delta=1e-4,

    # model name for each
    model_type="Simple_LSTM",

    # data for each scenario
    X_price=eur4_X_price,
    X_time=eur4_X_time,
    y=eur4_y,

    # specify these for each data
    no_tasks=1,
    flatten=False,
    time_horizon=time_horizon,

    # these have to be adjusted for some models
    patience=10,
    epochs=50,
    min_epochs=30,

    # saving model
    save_model_path=save_LSTM,

    # these go into the parameter grid
    lr=1e-5,
    dropout=0,
    l2_weight=1e-4,
    batch_size=64,
    
    hidden_layers=2,
    hidden_dim=16
)


save_KAN = os.path.join(
    root_folder,
    objects_relative_path,
    "KAN_model.pkl"
)

param_grid_KAN = {
    "lr": [5e-4, 5e-3],
    "dropout": [0, 0.1],
    "l2_weight": [1e-5, 1e-4],
    "batch_size": [256, 512],
    "hidden_layers": [1, 2],
    "hidden_dim": [16, 32],
    "knots": [5, 8],
    "spline_power": [3, 5]
}

res, nes_res, best_m, best_par, y_data  = train_and_evaluate_model(

    param_grid=param_grid_KAN,

    verbose=True,
    use_nested_cv=False,
    single_holdout=True,

    merge_price_time=True,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,

    # some models might not need this
    min_delta=1e-4,

    # model name for each
    model_type="Simple_KAN",

    # data for each scenario
    X_price=eur4_X_price,
    X_time=eur4_X_time,
    y=eur4_y,

    # specify these for each data
    no_tasks=1,
    flatten=True,
    time_horizon=time_horizon,

    # these have to be adjusted for some models
    patience=10,
    epochs=50,
    min_epochs=30,

    # saving model
    save_model_path=save_KAN,

    # these go into the parameter grid
    lr=1e-5,
    dropout=0,
    l2_weight=1e-4,
    batch_size=64,
    
    hidden_layers=2,
    hidden_dim=64,

    # specific to KAN:
    knots=12,
    spline_power=7
)


save_Custom_KAN = os.path.join(
    root_folder,
    objects_relative_path,
    "Custom_KAN_model.pkl"
)

param_grid_Custom_KAN = {
    "lr": [5e-4, 5e-3],
    "dropout": [0, 0.1],
    "l2_weight": [1e-5, 1e-4],
    "batch_size": [256, 512],
    "hidden_layers": [1, 2],
    "hidden_dim": [16, 32],
    "knots": [5, 8],
    "spline_power": [3, 5]
}

res, nes_res, best_m, best_par, y_data  = train_and_evaluate_model(

    param_grid=param_grid_Custom_KAN,

    verbose=True,
    use_nested_cv=False,
    single_holdout=True,

    merge_price_time=True,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,

    # some models might not need this
    min_delta=1e-4,

    # model name for each
    model_type="Custom_KAN",

    # data for each scenario
    X_price=eur4_X_price,
    X_time=eur4_X_time,
    y=eur4_y,

    # specify these for each data
    no_tasks=1,
    flatten=True,
    time_horizon=time_horizon,

    # these have to be adjusted for some models
    patience=10,
    epochs=50,
    min_epochs=30,

    # saving model
    save_model_path=save_Custom_KAN,

    # these go into the parameter grid
    lr=1e-5,
    dropout=0,
    l2_weight=1e-4,
    batch_size=64,
    
    hidden_layers=2,
    hidden_dim=64,

    # specific to KAN:
    knots=12,
    spline_power=7,

    # additional parameters for Custom KAN
    warmup_aux_epochs=15,
    joint_epochs=15
)



save_LSTM_KAN = os.path.join(
    root_folder,
    objects_relative_path,
    "LSTM_KAN_model.pkl"
)

param_grid_LSTM_KAN = {
    "lr": [5e-4, 5e-3],
    "dropout": [0, 0.1],
    "l2_weight": [1e-5, 1e-4],
    "batch_size": [16, 32],
    "hidden_layers": [2, 4],
    "hidden_dim": [32, 64],
    "knots": [5, 8],
    "spline_power": [3, 5]
}

res, nes_res, best_m, best_par, y_data  = train_and_evaluate_model(

    param_grid=param_grid_LSTM_KAN,

    verbose=True,
    use_nested_cv=False,
    single_holdout=True,

    merge_price_time=True,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,

    # some models might not need this
    min_delta=1e-4,

    # model name for each
    model_type="LSTM_KAN",

    # data for each scenario
    X_price=eur4_X_price,
    X_time=eur4_X_time,
    y=eur4_y,
    
    # specify these for each data
    no_tasks=1,
    flatten=False,
    time_horizon=time_horizon,

    # these have to be adjusted for some models
    patience=10,
    epochs=50,
    min_epochs=30,

    # saving model
    save_model_path=save_LSTM_KAN,

    # these go into the parameter grid
    lr=5e-3,
    dropout=0,
    l2_weight=1e-5,
    batch_size=32,
    
    hidden_layers=2,
    hidden_dim=64,

    # specific to KAN:
    knots=8,
    spline_power=3
)


save_Custom_LSTM_KAN = os.path.join(
    root_folder,
    objects_relative_path,
    "Custom_LSTM_KAN_model.pkl"
)

param_grid_Custom_LSTM_KAN = {
    "lr": [5e-4, 5e-3],
    "dropout": [0, 0.1],
    "l2_weight": [1e-5, 1e-4],
    "batch_size": [16, 32],
    "hidden_layers": [2, 4],
    "hidden_dim": [32, 64],
    "knots": [5, 8],
    "spline_power": [3, 5]
}

res, nes_res, best_m, best_par, y_data  = train_and_evaluate_model(

    param_grid=param_grid_Custom_LSTM_KAN,

    verbose=True,
    use_nested_cv=False,
    single_holdout=True,

    merge_price_time=True,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,

    # some models might not need this
    min_delta=1e-4,

    # model name for each
    model_type="Custom_LSTM_KAN",

    # data for each scenario
    X_price=eur4_X_price,
    X_time=eur4_X_time,
    y=eur4_y,
    
    # specify these for each data
    no_tasks=1,
    flatten=False,
    time_horizon=time_horizon,

    # these have to be adjusted for some models
    patience=10,
    epochs=50,
    min_epochs=30,

    # saving model
    save_model_path=save_Custom_LSTM_KAN,

    # these go into the parameter grid
    lr=5e-3,
    dropout=0,
    l2_weight=1e-5,
    batch_size=32,
    
    hidden_layers=2,
    hidden_dim=64,

    # specific to KAN:
    knots=8,
    spline_power=3
)


save_TimesNet = os.path.join(
    root_folder,
    objects_relative_path,
    "TimesNet_model.pkl"
)

param_grid_TimesNet = {
    "lr": [5e-4, 5e-3],
    "l2_weight": [5e-5, 5e-4],
    "batch_size": [256, 512],
    "d_model": [64, 128],
    "d_ff": [128, 256],
    "n_heads": [4, 6],
    "e_layers": [2, 4],
}

res, nes_res, best_m, best_par, y_data  = train_and_evaluate_model(

    param_grid=param_grid_TimesNet,

    verbose=True,
    use_nested_cv=False,
    single_holdout=True,

    merge_price_time=False,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,

    # some models might not need this
    min_delta=1e-4,

    # model name for each
    model_type="TimesNet",

    # data for each scenario
    X_price=eur4_X_price,
    X_time=eur4_X_time,
    y=eur4_y,
    
    # specify these for each data
    no_tasks=eur4_y.shape[2],
    flatten=False,
    time_horizon=28,

    # these have to be adjusted for some models
    patience=2,
    epochs = 1,
    min_epochs=50,

    # saving model
    save_model_path=save_TimesNet,

    # these go into the parameter grid
    lr=1e-3,
    dropout=0,
    l2_weight=1e-5,
    batch_size=4,
    
    # specific to TimeNet
    d_model=64,
    d_ff=256,
    n_heads=4,
    e_layers=2
)


save_ITransformer = os.path.join(
    root_folder,
    objects_relative_path,
    "ITransformer_model.pkl"
)

param_grid_ITransformer = {
    "lr": [5e-4, 5e-3],
    "l2_weight": [5e-5, 5e-4],
    "batch_size": [256, 512],
    "d_model": [64, 128],
    "d_ff": [128, 256],
    "n_heads": [4, 6],
    "e_layers": [2, 4],
}

res, nes_res, best_m, best_par, y_data  = train_and_evaluate_model(

    param_grid=param_grid_ITransformer,

    verbose=True,
    use_nested_cv=False,
    single_holdout=True,

    merge_price_time=False,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,

    # some models might not need this
    min_delta=1e-4,

    # model name for each
    model_type="ITransformer",

    # data for each scenario
    X_price=eur4_X_price,
    X_time=eur4_X_time,
    y=eur4_y,
    
    # specify these for each data
    no_tasks=eur4_y.shape[2],
    flatten=False,
    time_horizon=28,

    # these have to be adjusted for some models
    patience=2,
    epochs = 1,
    min_epochs=50,

    # saving model
    save_model_path=save_ITransformer,

    # these go into the parameter grid
    lr=1e-3,
    dropout=0,
    l2_weight=1e-5,
    batch_size=4,
    
    # specific to TimeNet
    d_model=64,
    d_ff=256,
    n_heads=4,
    e_layers=2
)

Data load example¶

In [362]:
load_data_object_8_file_path = os.path.join(root_folder, objects_relative_path, "structured_data_dict_8.pkl")

with open(load_data_object_8_file_path, "rb") as f:
    structured_data_dict_8 = pickle.load(f)

print("Data dictionary 8 loaded successfully.")

eur8_X_price = structured_data_dict_8["EURUSD"].get("X_other")
eur8_X_time = structured_data_dict_8["EURUSD"].get("X_time")
eur8_y = structured_data_dict_8["EURUSD"].get("y")
eur8_y_columns = structured_data_dict_8["EURUSD"].get("y_columns")
eur8_X_columns = structured_data_dict_8["EURUSD"].get("X_other_columns")

eur8_X_price.shape
Data dictionary 8 loaded successfully.
Out[362]:
(3782, 60, 1)

Dictionaries¶

In [326]:
tickers = ["AAPL", "MSFT", "GE", "BAC", "C", "BTCUSDT", "EURUSD", "GOLD", "SP500"]
horizons = [60, 20, 1]

model_list = [
    "Simple_MLP",
    "Simple_LSTM",
    "Simple_KAN",
    "LSTM_KAN",
    "TimesNet",
    "ITransformer",
]

param_grids = {
    "Simple_MLP": {
        "lr": [5e-3],
        "dropout": [0],
        "l2_weight": [1e-5],
        "batch_size": [512],
        "hidden_layers": [3],
        "hidden_dim": [64],
    },
    "Simple_LSTM": {
        "lr": [5e-3],
        "dropout": [0],
        "l2_weight": [1e-5],
        "batch_size": [16],
        "hidden_layers": [4],
        "hidden_dim": [64],
    },
    "Simple_KAN": {
        "lr": [5e-3],
        "dropout": [0],
        "l2_weight": [1e-5],
        "batch_size": [512],
        "hidden_layers": [4],
        "hidden_dim": [32],
        "knots": [8],
        "spline_power": [5],
    },
    "LSTM_KAN": {
        "lr": [5e-4],
        "dropout": [0],
        "l2_weight": [1e-5],
        "batch_size": [16],
        "hidden_layers": [2],
        "hidden_dim": [64],
        "knots": [8],
        "spline_power": [5],
    },
    "TimesNet": {
        "lr": [5e-3],
        "l2_weight": [5e-5],
        "batch_size": [512],
        "d_model": [128],
        "d_ff": [256],
        "n_heads": [4],
        "e_layers": [4],
    },
    "ITransformer": {
        "lr": [5e-3],
        "l2_weight": [5e-5],
        "batch_size": [512],
        "d_model": [128],
        "d_ff": [256],
        "n_heads": [4],
        "e_layers": [4],
    },
}

Complete dictionaries¶

In [327]:
complete_models = model_list = [
    "Simple_MLP",
    "Simple_LSTM",
    "Simple_KAN",
    "Custom_KAN",
    "LSTM_KAN",
    "Custom_LSTM_KAN",
    "TimesNet",
    "ITransformer",
]

complete_param_grids = {
    "Simple_MLP": {
        "lr": [5e-4, 5e-3],
        "dropout": [0, 0.1],
        "l2_weight": [1e-5, 1e-4],
        "batch_size": [128, 512],
        "hidden_layers": [2, 4],
        "hidden_dim": [32, 64],
    },
    "Simple_LSTM": {
        "lr": [5e-4, 5e-3],
        "dropout": [0, 0.1],
        "l2_weight": [1e-5, 1e-4],
        "batch_size": [16, 32],
        "hidden_layers": [2, 4],
        "hidden_dim": [32, 64],
    },
    "Simple_KAN": {
        "lr": [5e-4, 5e-3],
        "dropout": [0, 0.1],
        "l2_weight": [1e-5, 1e-4],
        "batch_size": [256, 512],
        "hidden_layers": [1, 2],
        "hidden_dim": [16, 32],
        "knots": [5, 8],
        "spline_power": [3, 5],
    },
    "Custom_KAN": {
        "lr": [5e-4, 5e-3],
        "dropout": [0, 0.1],
        "l2_weight": [1e-5, 1e-4],
        "batch_size": [256, 512],
        "hidden_layers": [1, 2],
        "hidden_dim": [16, 32],
        "knots": [5, 8],
        "spline_power": [3, 5],
    },
    "LSTM_KAN": {
        "lr": [5e-4, 5e-3],
        "dropout": [0, 0.1],
        "l2_weight": [1e-5, 1e-4],
        "batch_size": [16, 32],
        "hidden_layers": [2, 4],
        "hidden_dim": [32, 64],
        "knots": [5, 8],
        "spline_power": [3, 5],
    },
    "Custom_LSTM_KAN": {
        "lr": [5e-4, 5e-3],
        "dropout": [0, 0.1],
        "l2_weight": [1e-5, 1e-4],
        "batch_size": [16, 32],
        "hidden_layers": [2, 4],
        "hidden_dim": [32, 64],
        "knots": [5, 8],
        "spline_power": [3, 5],
    },
    "TimesNet": {
        "lr": [5e-4, 5e-3],
        "l2_weight": [5e-5, 5e-4],
        "batch_size": [256, 512],
        "d_model": [64, 128],
        "d_ff": [128, 256],
        "n_heads": [4, 6],
        "e_layers": [2, 4],
    },
    "ITransformer": {
        "lr": [5e-4, 5e-3],
        "l2_weight": [5e-5, 5e-4],
        "batch_size": [256, 512],
        "d_model": [64, 128],
        "d_ff": [128, 256],
        "n_heads": [4, 6],
        "e_layers": [2, 4],
    },
}

Left out dictionaries¶

In [328]:
model_list_left_out = [
    "Custom_KAN",
    "Custom_LSTM_KAN"
]

param_grids_lest_out = {
        "Custom_KAN": {
            "lr": [5e-4, 5e-3],
            "dropout": [0, 0.1],
            "l2_weight": [1e-5, 1e-4],
            "batch_size": [256, 512],
            "hidden_layers": [1, 2],
            "hidden_dim": [16, 32],
            "knots": [5, 8],
            "spline_power": [3, 5],
        },
        "Custom_LSTM_KAN": {
            "lr": [5e-4, 5e-3],
            "dropout": [0, 0.1],
            "l2_weight": [1e-5, 1e-4],
            "batch_size": [16, 32],
            "hidden_layers": [2, 4],
            "hidden_dim": [32, 64],
            "knots": [5, 8],
            "spline_power": [3, 5],
        }
}

Routine to run all models¶

In [133]:
import os
from collections import defaultdict
from sklearn.model_selection import ParameterGrid
import inspect

tickers  = ["AAPL", "BAC", "BTCUSDT", "EURUSD", "GOLD", "SP500"]
horizons = [20, 1, 3, 5, 10]

model_list = [
    "Simple_MLP", "Simple_LSTM", "Simple_KAN",
    "LSTM_KAN", "TimesNet", "ITransformer"
]

FIXED_PARAMS = {
    "Simple_MLP": dict(
        lr=5e-3, dropout=0.0, l2_weight=1e-5, batch_size=64, hidden_layers=3, hidden_dim=512,
        epochs=50, patience=10, min_epochs=30, min_delta=1e-4
    ),
    "Simple_LSTM": dict(
        lr=5e-3, dropout=0.0, l2_weight=1e-5, batch_size=16, hidden_layers=4, hidden_dim=128,
        epochs=50, patience=10, min_epochs=30, min_delta=1e-4
    ),
    "Simple_KAN": dict(
        lr=5e-3, dropout=0.0, l2_weight=1e-5, batch_size=128, hidden_layers=3, hidden_dim=128, knots=8, spline_power=5,
        epochs=50, patience=10, min_epochs=30, min_delta=1e-4
    ),
    "LSTM_KAN": dict(
        lr=5e-4, dropout=0.0, l2_weight=1e-5, batch_size=16, hidden_layers=2, hidden_dim=128, knots=8, spline_power=5,
        epochs=50, patience=10, min_epochs=30, min_delta=1e-4
    ),
    "TimesNet": dict(
        lr=5e-3, l2_weight=5e-5, batch_size=512, d_model=128, d_ff=256, n_heads=4, e_layers=4, dropout=0.0,
        epochs=50, patience=10, min_epochs=30, min_delta=1e-4
    ),
    "ITransformer": dict(
        lr=5e-3, l2_weight=5e-5, batch_size=512, d_model=128, d_ff=256, n_heads=4, e_layers=4, dropout=0.0,
        epochs=50, patience=10, min_epochs=30, min_delta=1e-4
    ),
}

MODEL_IO = {
    "Simple_MLP": dict(merge_price_time=False,  flatten=True),
    "Simple_KAN": dict(merge_price_time=False,  flatten=True),
    "Simple_LSTM": dict(merge_price_time=False,  flatten=False),
    "LSTM_KAN": dict(merge_price_time=False,  flatten=False),
    "TimesNet": dict(merge_price_time=False, flatten=False),
    "ITransformer": dict(merge_price_time=False, flatten=False),
}

COMMON = dict(
    use_nested_cv=False,
    single_holdout=False,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,
    verbose=True,
)

MULTI_TASK_MODELS = {"TimesNet", "ITransformer"}

BASE_SAVE_DIR = os.path.join(root_folder, objects_relative_path, "runs")
results_store = defaultdict(lambda: defaultdict(dict))


def filter_kwargs_for_callable(fn, d):
    sig = inspect.signature(fn)
    return {k: v for k, v in d.items() if k in sig.parameters}

def run_all_models_for_all(
    *,
    tickers,
    horizons,
    model_list,
    X_price_map,
    X_time_map,
    y_map,
    base_save_dir,
    fixed_params,        
    common_params,          
    model_io,               
    multitask_models,      
    param_grids=None,       
    grid_tickers=None,      
    grid_horizons=None,     
    results_store=None,
    overwrite=True,
    run_grid_with_single_holdout=None, 
):

    if results_store is None:
        results_store = defaultdict(lambda: defaultdict(dict))

    param_grids = param_grids or {}
    os.makedirs(base_save_dir, exist_ok=True)
    verbose = bool(common_params.get("verbose", False))

    force_grid = bool(run_grid_with_single_holdout)

    for ticker in tickers:
        if ticker not in X_price_map or ticker not in y_map:
            if verbose:
                print(f"[skip] Missing data for ticker: {ticker}")
            continue

        Xp = X_price_map[ticker]
        Xt = X_time_map.get(ticker, None)
        Y  = y_map[ticker]

        for horizon in horizons:
            for model_name in model_list:
                if model_name not in model_io:
                    if verbose: print(f"[skip] MODEL_IO missing for model: {model_name}")
                    continue
                if model_name not in fixed_params:
                    if verbose: print(f"[skip] FIXED_PARAMS missing for model: {model_name}")
                    continue

                io     = model_io[model_name]
                fixed  = dict(fixed_params[model_name])  
                ntasks = (Y.shape[2] if model_name in multitask_models else 1)

                t_dict = results_store.setdefault(ticker, {})
                h_dict = t_dict.setdefault(horizon, {})

                if (not overwrite) and (model_name in h_dict):
                    if verbose:
                        print(f"[skip] {ticker} | H={horizon} | {model_name} (exists, overwrite=False)")
                    continue


                if force_grid:
                    single_holdout = True
                    grid = param_grids.get(model_name, None)
                else:
                    single_holdout = False
                    grid = None 

                save_path = os.path.join(base_save_dir, f"{ticker}", f"{model_name}_H{horizon}.pkl")
                os.makedirs(os.path.dirname(save_path), exist_ok=True)

                if verbose:
                    mode_note = " (grid + single-holdout)" if single_holdout else " (simple fit)"
                    print(f"\n=== {ticker} | H={horizon} | {model_name}{mode_note} | no_tasks={ntasks} ===")

                
                merged = dict(common_params)
                merged.update(fixed)
                for k in (
                    "single_holdout", "param_grid", "use_nested_cv",
                    "no_tasks", "time_horizon", "merge_price_time", "flatten"
                ):
                    merged.pop(k, None)

                if force_grid:
                    merged["use_nested_cv"] = False

                merged = filter_kwargs_for_callable(train_and_evaluate_model, merged)

                results, nested, best_model, used_params, y_data = train_and_evaluate_model(
                    model_type=model_name,
                    X_price=Xp,
                    X_time=Xt,
                    y=Y,
                    no_tasks=ntasks,
                    merge_price_time=io["merge_price_time"],
                    flatten=io["flatten"],
                    time_horizon=horizon,
                    save_model_path=save_path,
                    single_holdout=single_holdout,
                    param_grid=grid,
                    **merged
                )

                h_dict[model_name] = {
                    "results": results,
                    "model": best_model,
                    "used_params": used_params,
                    "y_data": y_data,
                    "save_path": save_path,
                    "grid_used": single_holdout, 
                }

    return results_store

load_data_object_8_file_path = os.path.join(root_folder, objects_relative_path, "structured_data_dict_8.pkl")

with open(load_data_object_8_file_path, "rb") as f:
    structured_data_dict_8 = pickle.load(f)

print("Data dictionary 8 loaded successfully.")


X_price_map, X_time_map, y_map = {}, {}, {}

missing = []
for t in tickers:
    if t not in structured_data_dict_8:
        missing.append(t)
        continue

    entry = structured_data_dict_8[t]

    Xp = entry.get("X_other", None)  
    Xt = entry.get("X_time",  None)
    Y  = entry.get("y",       None)

    if Xp is None or Y is None:
        print(f"[WARN] {t}: missing {'X_other' if Xp is None else ''}{' and ' if (Xp is None and Y is None) else ''}{'y' if Y is None else ''} → skipping.")
        continue

    Xp = np.asarray(Xp, dtype=float)
    Xt = None if Xt is None else np.asarray(Xt, dtype=float)
    Y  = np.asarray(Y,  dtype=float)

    
    if Xp.ndim != 3 or Y.ndim != 3:
        print(f"[WARN] {t}: unexpected dims X_other={Xp.shape}, y={Y.shape} → skipping.")
        continue
    if Xt is not None and Xt.ndim != 3:
        print(f"[WARN] {t}: unexpected dims X_time={Xt.shape} → setting to None.")
        Xt = None

    X_price_map[t] = Xp
    X_time_map[t]  = Xt
    y_map[t]       = Y

if missing:
    print(f"[INFO] Missing tickers in data dict (skipped): {missing}")


if "MSFT" in y_map:
    print("MSFT shapes:",
          "X_other", X_price_map["MSFT"].shape,
          "X_time",  None if X_time_map["MSFT"] is None else X_time_map["MSFT"].shape,
          "y",       y_map["MSFT"].shape)
Data dictionary 8 loaded successfully.
In [358]:
X_price_map["EURUSD"].shape
Out[358]:
(3782, 60, 1)

Helper to merge GARCH results with ML ones¶

In [134]:
import os
import pickle
import numpy as np

def run_garch_over_dict(
    structured_data_dict,
    tickers,
    horizons,
    results_dict,
    *,
   
    x_price_key="X_other",
    y_key="y",

    anchor_step=55,           
    anchor_feature_idx=0,      
    y_feature_idx=0,           

    split_ratio=0.8,           
    roll_window=250,           
    mean_mode="Zero",          
    candidates=None,          
    sim_paths=2000,           
    metric_horizons=(1, 3, 5, 10, 20, -1),

    verbose=True,
    save_dir=None,
):
    def _get_x_price(d):
        x = d.get(x_price_key)
        if x is None:
            x = d.get("X_price")
        return x

    if save_dir:
        os.makedirs(save_dir, exist_ok=True)

    for ticker in tickers:
        if ticker not in structured_data_dict:
            if verbose:
                print(f"[GARCH] Skipping {ticker}: not in structured_data_dict.")
            continue

        bundle = structured_data_dict[ticker]
        X_price = _get_x_price(bundle)
        y_all   = bundle.get(y_key)

        if X_price is None or y_all is None:
            if verbose:
                print(f"[GARCH] Skipping {ticker}: missing X or y (keys tried: {x_price_key}/X_price and {y_key}).")
            continue

        if X_price.ndim != 3:
            raise ValueError(f"[GARCH] {ticker}: X_price must be 3D (N, T, F). Got {X_price.shape}.")
        if y_all.ndim != 3:
            raise ValueError(f"[GARCH] {ticker}: y must be 3D (N, H, C). Got {y_all.shape}.")
        if anchor_step >= X_price.shape[1] or anchor_feature_idx >= X_price.shape[2]:
            raise IndexError(f"[GARCH] {ticker}: anchor_step/feature out of bounds for X_price shape {X_price.shape}.")
        if y_feature_idx >= y_all.shape[2]:
            raise IndexError(f"[GARCH] {ticker}: y_feature_idx out of bounds for y shape {y_all.shape}.")


        X_anchors = X_price[:, anchor_step, anchor_feature_idx].astype(float)  
        Y_fwd = y_all[:, :, y_feature_idx].astype(float)                  

        results_dict.setdefault(ticker, {})

        max_h_avail = Y_fwd.shape[1]
        for H in horizons:
            if H > max_h_avail:
                if verbose:
                    print(f"[GARCH] {ticker}: skip H={H} (only {max_h_avail} steps available).")
                continue

            if verbose:
                print(f"\n[GARCH] {ticker} — H={H} (step={anchor_step}, x_feat={anchor_feature_idx}, y_feat={y_feature_idx})")

            metrics, best_params, _, panel_df, y_pair = evaluate_vol_on_anchors_rolling_split(
                X_anchors=X_anchors,
                Y_fwd=Y_fwd,
                horizon=H,
                split_ratio=split_ratio,
                roll_window=roll_window,
                mean_mode=mean_mode,
                candidates=candidates,
                sim_paths=sim_paths,
                horizons=metric_horizons,
                verbose=verbose,
            )

            results_dict[ticker].setdefault(H, {})
            results_dict[ticker][H]["GARCH"] = {
                "results": metrics,
                "best_params": best_params,   
                "model": None,          
                "y_data": y_pair,       
                "panel": panel_df,      
                "params": {
                    "split_ratio": split_ratio,
                    "roll_window": roll_window,
                    "mean_mode": mean_mode,
                    "candidates": candidates,
                    "sim_paths": sim_paths,
                },
            }

            if save_dir:
                out_path = os.path.join(save_dir, f"GARCH_{ticker}_H{H}.pkl")
                to_save = {
                    "metrics": metrics,
                    "panel_head": panel_df.head(10),  
                    "y_data_shape": y_pair.shape,
                    "params": {
                        "split_ratio": split_ratio,
                        "roll_window": roll_window,
                        "mean_mode": mean_mode,
                        "candidates": candidates,
                        "sim_paths": sim_paths,
                    },
                }
                try:
                    with open(out_path, "wb") as f:
                        pickle.dump(to_save, f)
                    if verbose:
                        print(f"[GARCH] Saved summary: {out_path}")
                except Exception as e:
                    if verbose:
                        print(f"[GARCH] Warning: could not pickle summary for {ticker}, H={H}: {e}")

    return results_dict

Call to run ML models¶

In [ ]:
BASE_SAVE_DIR = os.path.join(root_folder, objects_relative_path, "runs_timeseries")
os.makedirs(BASE_SAVE_DIR, exist_ok=True)

results_store = run_all_models_for_all(
    tickers=tickers,
    horizons=horizons,
    model_list=model_list,
    X_price_map=X_price_map,
    X_time_map=X_time_map,
    y_map=y_map,
    base_save_dir=BASE_SAVE_DIR,
    fixed_params=FIXED_PARAMS,
    common_params=COMMON,
    model_io=MODEL_IO,
    multitask_models=MULTI_TASK_MODELS,
    param_grids=complete_param_grids,
    grid_tickers=tickers,
    grid_horizons=horizons,
    results_store=None,
    overwrite=True,
)

print("\nAll runs completed. Example access:")
ex_ticker, ex_H, ex_model = "MSFT", 60, "Simple_MLP"
if ex_ticker in results_store and ex_H in results_store[ex_ticker] and ex_model in results_store[ex_ticker][ex_H]:
    bundle = results_store[ex_ticker][ex_H][ex_model]
    print(f"- {ex_ticker} H={ex_H} {ex_model} metrics keys:", list(bundle["results"].keys())[:5], "...")
    print(f"- saved model path: {bundle['save_path']}")

Call to run GARCH extension¶

In [ ]:
tickers = ["AAPL", "BAC", "BTCUSDT", "EURUSD", "GOLD", "SP500"]
horizons = [20, 1, 3, 5, 10]

all_results = run_garch_over_dict(
    structured_data_dict=structured_data_dict_8,
    tickers=["EURUSD","GOLD"],
    horizons=[1,5,20],
    results_dict={},
    anchor_step=20,
    anchor_feature_idx=0,
    y_feature_idx=0,
    split_ratio=0.8,
    roll_window=250,
    mean_mode="Zero",
    candidates=[("GARCH", 1, 1, "t")],  
    sim_paths=0,                        
    metric_horizons=(1,5,20,-1),
    verbose=True,
    save_dir=None
)
[GARCH] AAPL — horizon=20  (anchors: step=55, feat=0; y_feat=0)
[eval] B=2018, n_fit=1614, horizon=20
[fit] Best by BIC: ('GARCH', 2, 2, 't'), BIC=-6221.349

[best config] ('GARCH', 2, 2, 't')
[best params (train fit)]
  omega: 0.005131
  alpha[1]: 0.000000
  alpha[2]: 0.000932
  beta[1]: 0.000003
  beta[2]: 0.101618
  nu: 2.366449

[metrics]

1 day(s)
MAE       :  1247.361545
RMSE      :  1247.391571
R2        : -20830.404952
Pearson r :    -0.057371
QLIKE     :     0.535415

3 day(s)
MAE       :  1247.598175
RMSE      :  1247.629979
R2        : -19695.647025
Pearson r :    -0.010520
QLIKE     :     0.553156

5 day(s)
MAE       :  1247.856658
RMSE      :  1247.888798
R2        : -19486.389292
Pearson r :    -0.004070
QLIKE     :     0.556909

10 day(s)
MAE       :  1248.069276
RMSE      :  1248.101633
R2        : -19332.684614
Pearson r :    -0.000541
QLIKE     :     0.559523

20 day(s)
MAE       :  1248.151050
RMSE      :  1248.183505
R2        : -19252.970840
Pearson r :     0.001326
QLIKE     :     0.559622

full horizon
MAE       :  1248.151050
RMSE      :  1248.183505
R2        : -19252.970840
Pearson r :     0.001326
QLIKE     :     0.559622
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_AAPL_H20.pkl

[GARCH] AAPL — horizon=1  (anchors: step=55, feat=0; y_feat=0)
[eval] B=2018, n_fit=1614, horizon=1
[fit] Best by BIC: ('GARCH', 2, 2, 't'), BIC=-6221.349

[best config] ('GARCH', 2, 2, 't')
[best params (train fit)]
  omega: 0.005131
  alpha[1]: 0.000000
  alpha[2]: 0.000932
  beta[1]: 0.000003
  beta[2]: 0.101618
  nu: 2.366449

[metrics]

1 day(s)
MAE       :  1247.361545
RMSE      :  1247.391571
R2        : -20830.404952
Pearson r :    -0.057371
QLIKE     :     0.535415

full horizon
MAE       :  1247.361545
RMSE      :  1247.391571
R2        : -20830.404952
Pearson r :    -0.057371
QLIKE     :     0.535415
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_AAPL_H1.pkl

[GARCH] AAPL — horizon=3  (anchors: step=55, feat=0; y_feat=0)
[eval] B=2018, n_fit=1614, horizon=3
[fit] Best by BIC: ('GARCH', 2, 2, 't'), BIC=-6221.349

[best config] ('GARCH', 2, 2, 't')
[best params (train fit)]
  omega: 0.005131
  alpha[1]: 0.000000
  alpha[2]: 0.000932
  beta[1]: 0.000003
  beta[2]: 0.101618
  nu: 2.366449

[metrics]

1 day(s)
MAE       :  1247.361545
RMSE      :  1247.391571
R2        : -20830.404952
Pearson r :    -0.057371
QLIKE     :     0.535415

3 day(s)
MAE       :  1247.598175
RMSE      :  1247.629979
R2        : -19695.647025
Pearson r :    -0.010520
QLIKE     :     0.553156

full horizon
MAE       :  1247.598175
RMSE      :  1247.629979
R2        : -19695.647025
Pearson r :    -0.010520
QLIKE     :     0.553156
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_AAPL_H3.pkl

[GARCH] AAPL — horizon=5  (anchors: step=55, feat=0; y_feat=0)
[eval] B=2018, n_fit=1614, horizon=5
/opt/anaconda3/envs/NN_env/lib/python3.10/site-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 4. The message is:
Inequality constraints incompatible
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
/opt/anaconda3/envs/NN_env/lib/python3.10/site-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 8. The message is:
Positive directional derivative for linesearch
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
/opt/anaconda3/envs/NN_env/lib/python3.10/site-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 4. The message is:
Inequality constraints incompatible
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
/opt/anaconda3/envs/NN_env/lib/python3.10/site-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 4. The message is:
Inequality constraints incompatible
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
/opt/anaconda3/envs/NN_env/lib/python3.10/site-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 8. The message is:
Positive directional derivative for linesearch
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
/opt/anaconda3/envs/NN_env/lib/python3.10/site-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 4. The message is:
Inequality constraints incompatible
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
/opt/anaconda3/envs/NN_env/lib/python3.10/site-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 4. The message is:
Inequality constraints incompatible
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
/opt/anaconda3/envs/NN_env/lib/python3.10/site-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 8. The message is:
Positive directional derivative for linesearch
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
/opt/anaconda3/envs/NN_env/lib/python3.10/site-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 4. The message is:
Inequality constraints incompatible
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
/opt/anaconda3/envs/NN_env/lib/python3.10/site-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 4. The message is:
Inequality constraints incompatible
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
/opt/anaconda3/envs/NN_env/lib/python3.10/site-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 8. The message is:
Positive directional derivative for linesearch
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
/opt/anaconda3/envs/NN_env/lib/python3.10/site-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 4. The message is:
Inequality constraints incompatible
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
/opt/anaconda3/envs/NN_env/lib/python3.10/site-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 4. The message is:
Inequality constraints incompatible
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
/opt/anaconda3/envs/NN_env/lib/python3.10/site-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 8. The message is:
Positive directional derivative for linesearch
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
/opt/anaconda3/envs/NN_env/lib/python3.10/site-packages/arch/univariate/base.py:768: ConvergenceWarning: The optimizer returned code 4. The message is:
Inequality constraints incompatible
See scipy.optimize.fmin_slsqp for code meaning.

  warnings.warn(
[fit] Best by BIC: ('GARCH', 2, 2, 't'), BIC=-6221.349

[best config] ('GARCH', 2, 2, 't')
[best params (train fit)]
  omega: 0.005131
  alpha[1]: 0.000000
  alpha[2]: 0.000932
  beta[1]: 0.000003
  beta[2]: 0.101618
  nu: 2.366449

[metrics]

1 day(s)
MAE       :  1247.361545
RMSE      :  1247.391571
R2        : -20830.404952
Pearson r :    -0.057371
QLIKE     :     0.535415

3 day(s)
MAE       :  1247.598175
RMSE      :  1247.629979
R2        : -19695.647025
Pearson r :    -0.010520
QLIKE     :     0.553156

5 day(s)
MAE       :  1247.856658
RMSE      :  1247.888798
R2        : -19486.389292
Pearson r :    -0.004070
QLIKE     :     0.556909

full horizon
MAE       :  1247.856658
RMSE      :  1247.888798
R2        : -19486.389292
Pearson r :    -0.004070
QLIKE     :     0.556909
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_AAPL_H5.pkl

[GARCH] AAPL — horizon=10  (anchors: step=55, feat=0; y_feat=0)
[eval] B=2018, n_fit=1614, horizon=10
[fit] Best by BIC: ('GARCH', 2, 2, 't'), BIC=-6221.349

[best config] ('GARCH', 2, 2, 't')
[best params (train fit)]
  omega: 0.005131
  alpha[1]: 0.000000
  alpha[2]: 0.000932
  beta[1]: 0.000003
  beta[2]: 0.101618
  nu: 2.366449

[metrics]

1 day(s)
MAE       :  1247.361545
RMSE      :  1247.391571
R2        : -20830.404952
Pearson r :    -0.057371
QLIKE     :     0.535415

3 day(s)
MAE       :  1247.598175
RMSE      :  1247.629979
R2        : -19695.647025
Pearson r :    -0.010520
QLIKE     :     0.553156

5 day(s)
MAE       :  1247.856658
RMSE      :  1247.888798
R2        : -19486.389292
Pearson r :    -0.004070
QLIKE     :     0.556909

10 day(s)
MAE       :  1248.069276
RMSE      :  1248.101633
R2        : -19332.684614
Pearson r :    -0.000541
QLIKE     :     0.559523

full horizon
MAE       :  1248.069276
RMSE      :  1248.101633
R2        : -19332.684614
Pearson r :    -0.000541
QLIKE     :     0.559523
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_AAPL_H10.pkl

[GARCH] BAC — horizon=20  (anchors: step=55, feat=0; y_feat=0)
[eval] B=2018, n_fit=1614, horizon=20
[fit] Best by BIC: ('GARCH', 1, 1, 't'), BIC=971.679

[best config] ('GARCH', 1, 1, 't')
[best params (train fit)]
  omega: 0.029588
  alpha[1]: 0.421756
  beta[1]: 0.376107
  nu: 7.692989

[metrics]

1 day(s)
MAE       :    18.782275
RMSE      :    71.493605
R2        :   -83.822500
Pearson r :    -0.016550
QLIKE     :     0.585551

3 day(s)
MAE       :    18.238195
RMSE      :    59.861951
R2        :   -58.429355
Pearson r :    -0.021610
QLIKE     :     0.553894

5 day(s)
MAE       :    17.798800
RMSE      :    51.761570
R2        :   -43.420143
Pearson r :    -0.021995
QLIKE     :     0.528120

10 day(s)
MAE       :    17.123131
RMSE      :    40.124812
R2        :   -25.690081
Pearson r :    -0.019958
QLIKE     :     0.496919

20 day(s)
MAE       :    16.538826
RMSE      :    30.848084
R2        :   -14.780593
Pearson r :    -0.016033
QLIKE     :     0.466328

full horizon
MAE       :    16.538826
RMSE      :    30.848084
R2        :   -14.780593
Pearson r :    -0.016033
QLIKE     :     0.466328
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_BAC_H20.pkl

[GARCH] BAC — horizon=1  (anchors: step=55, feat=0; y_feat=0)
[eval] B=2018, n_fit=1614, horizon=1
[fit] Best by BIC: ('GARCH', 1, 1, 't'), BIC=971.679

[best config] ('GARCH', 1, 1, 't')
[best params (train fit)]
  omega: 0.029588
  alpha[1]: 0.421756
  beta[1]: 0.376107
  nu: 7.692989

[metrics]

1 day(s)
MAE       :    18.782275
RMSE      :    71.493605
R2        :   -83.822500
Pearson r :    -0.016550
QLIKE     :     0.585551

full horizon
MAE       :    18.782275
RMSE      :    71.493605
R2        :   -83.822500
Pearson r :    -0.016550
QLIKE     :     0.585551
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_BAC_H1.pkl

[GARCH] BAC — horizon=3  (anchors: step=55, feat=0; y_feat=0)
[eval] B=2018, n_fit=1614, horizon=3
[fit] Best by BIC: ('GARCH', 1, 1, 't'), BIC=971.679

[best config] ('GARCH', 1, 1, 't')
[best params (train fit)]
  omega: 0.029588
  alpha[1]: 0.421756
  beta[1]: 0.376107
  nu: 7.692989

[metrics]

1 day(s)
MAE       :    18.782275
RMSE      :    71.493605
R2        :   -83.822500
Pearson r :    -0.016550
QLIKE     :     0.585551

3 day(s)
MAE       :    18.238195
RMSE      :    59.861951
R2        :   -58.429355
Pearson r :    -0.021610
QLIKE     :     0.553894

full horizon
MAE       :    18.238195
RMSE      :    59.861951
R2        :   -58.429355
Pearson r :    -0.021610
QLIKE     :     0.553894
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_BAC_H3.pkl

[GARCH] BAC — horizon=5  (anchors: step=55, feat=0; y_feat=0)
[eval] B=2018, n_fit=1614, horizon=5
[fit] Best by BIC: ('GARCH', 1, 1, 't'), BIC=971.679

[best config] ('GARCH', 1, 1, 't')
[best params (train fit)]
  omega: 0.029588
  alpha[1]: 0.421756
  beta[1]: 0.376107
  nu: 7.692989

[metrics]

1 day(s)
MAE       :    18.782275
RMSE      :    71.493605
R2        :   -83.822500
Pearson r :    -0.016550
QLIKE     :     0.585551

3 day(s)
MAE       :    18.238195
RMSE      :    59.861951
R2        :   -58.429355
Pearson r :    -0.021610
QLIKE     :     0.553894

5 day(s)
MAE       :    17.798800
RMSE      :    51.761570
R2        :   -43.420143
Pearson r :    -0.021995
QLIKE     :     0.528120

full horizon
MAE       :    17.798800
RMSE      :    51.761570
R2        :   -43.420143
Pearson r :    -0.021995
QLIKE     :     0.528120
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_BAC_H5.pkl

[GARCH] BAC — horizon=10  (anchors: step=55, feat=0; y_feat=0)
[eval] B=2018, n_fit=1614, horizon=10
[fit] Best by BIC: ('GARCH', 1, 1, 't'), BIC=971.679

[best config] ('GARCH', 1, 1, 't')
[best params (train fit)]
  omega: 0.029588
  alpha[1]: 0.421756
  beta[1]: 0.376107
  nu: 7.692989

[metrics]

1 day(s)
MAE       :    18.782275
RMSE      :    71.493605
R2        :   -83.822500
Pearson r :    -0.016550
QLIKE     :     0.585551

3 day(s)
MAE       :    18.238195
RMSE      :    59.861951
R2        :   -58.429355
Pearson r :    -0.021610
QLIKE     :     0.553894

5 day(s)
MAE       :    17.798800
RMSE      :    51.761570
R2        :   -43.420143
Pearson r :    -0.021995
QLIKE     :     0.528120

10 day(s)
MAE       :    17.123131
RMSE      :    40.124812
R2        :   -25.690081
Pearson r :    -0.019958
QLIKE     :     0.496919

full horizon
MAE       :    17.123131
RMSE      :    40.124812
R2        :   -25.690081
Pearson r :    -0.019958
QLIKE     :     0.496919
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_BAC_H10.pkl

[GARCH] BTCUSDT — horizon=20  (anchors: step=55, feat=0; y_feat=0)
[eval] B=2399, n_fit=1919, horizon=20
[fit] Best by BIC: ('GARCH', 1, 1, 't'), BIC=1478.049

[best config] ('GARCH', 1, 1, 't')
[best params (train fit)]
  omega: 0.059681
  alpha[1]: 0.956642
  beta[1]: 0.014088
  nu: 4.471211

[metrics]

1 day(s)
MAE       :   207.204437
RMSE      :   635.349959
R2        : -4782.854861
Pearson r :    -0.029780
QLIKE     :     0.488054

3 day(s)
MAE       :   285.222552
RMSE      :   652.221901
R2        : -5039.603758
Pearson r :    -0.000142
QLIKE     :     0.497030

5 day(s)
MAE       :   360.229661
RMSE      :   679.931411
R2        : -5478.717553
Pearson r :    -0.000500
QLIKE     :     0.513093

10 day(s)
MAE       :   535.437861
RMSE      :   779.241332
R2        : -7194.606480
Pearson r :    -0.001405
QLIKE     :     0.553222

20 day(s)
MAE       :   839.134871
RMSE      :  1022.317360
R2        : -12388.343878
Pearson r :    -0.001163
QLIKE     :     0.601529

full horizon
MAE       :   839.134871
RMSE      :  1022.317360
R2        : -12388.343878
Pearson r :    -0.001163
QLIKE     :     0.601529
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_BTCUSDT_H20.pkl

[GARCH] BTCUSDT — horizon=1  (anchors: step=55, feat=0; y_feat=0)
[eval] B=2399, n_fit=1919, horizon=1
[fit] Best by BIC: ('GARCH', 1, 1, 't'), BIC=1478.049

[best config] ('GARCH', 1, 1, 't')
[best params (train fit)]
  omega: 0.059681
  alpha[1]: 0.956642
  beta[1]: 0.014088
  nu: 4.471211

[metrics]

1 day(s)
MAE       :   207.204437
RMSE      :   635.349959
R2        : -4782.854861
Pearson r :    -0.029780
QLIKE     :     0.488054

full horizon
MAE       :   207.204437
RMSE      :   635.349959
R2        : -4782.854861
Pearson r :    -0.029780
QLIKE     :     0.488054
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_BTCUSDT_H1.pkl

[GARCH] BTCUSDT — horizon=3  (anchors: step=55, feat=0; y_feat=0)
[eval] B=2399, n_fit=1919, horizon=3
[fit] Best by BIC: ('GARCH', 1, 1, 't'), BIC=1478.049

[best config] ('GARCH', 1, 1, 't')
[best params (train fit)]
  omega: 0.059681
  alpha[1]: 0.956642
  beta[1]: 0.014088
  nu: 4.471211

[metrics]

1 day(s)
MAE       :   207.204437
RMSE      :   635.349959
R2        : -4782.854861
Pearson r :    -0.029780
QLIKE     :     0.488054

3 day(s)
MAE       :   285.222552
RMSE      :   652.221901
R2        : -5039.603758
Pearson r :    -0.000142
QLIKE     :     0.497030

full horizon
MAE       :   285.222552
RMSE      :   652.221901
R2        : -5039.603758
Pearson r :    -0.000142
QLIKE     :     0.497030
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_BTCUSDT_H3.pkl

[GARCH] BTCUSDT — horizon=5  (anchors: step=55, feat=0; y_feat=0)
[eval] B=2399, n_fit=1919, horizon=5
[fit] Best by BIC: ('GARCH', 1, 1, 't'), BIC=1478.049

[best config] ('GARCH', 1, 1, 't')
[best params (train fit)]
  omega: 0.059681
  alpha[1]: 0.956642
  beta[1]: 0.014088
  nu: 4.471211

[metrics]

1 day(s)
MAE       :   207.204437
RMSE      :   635.349959
R2        : -4782.854861
Pearson r :    -0.029780
QLIKE     :     0.488054

3 day(s)
MAE       :   285.222552
RMSE      :   652.221901
R2        : -5039.603758
Pearson r :    -0.000142
QLIKE     :     0.497030

5 day(s)
MAE       :   360.229661
RMSE      :   679.931411
R2        : -5478.717553
Pearson r :    -0.000500
QLIKE     :     0.513093

full horizon
MAE       :   360.229661
RMSE      :   679.931411
R2        : -5478.717553
Pearson r :    -0.000500
QLIKE     :     0.513093
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_BTCUSDT_H5.pkl

[GARCH] BTCUSDT — horizon=10  (anchors: step=55, feat=0; y_feat=0)
[eval] B=2399, n_fit=1919, horizon=10
[fit] Best by BIC: ('GARCH', 1, 1, 't'), BIC=1478.049

[best config] ('GARCH', 1, 1, 't')
[best params (train fit)]
  omega: 0.059681
  alpha[1]: 0.956642
  beta[1]: 0.014088
  nu: 4.471211

[metrics]

1 day(s)
MAE       :   207.204437
RMSE      :   635.349959
R2        : -4782.854861
Pearson r :    -0.029780
QLIKE     :     0.488054

3 day(s)
MAE       :   285.222552
RMSE      :   652.221901
R2        : -5039.603758
Pearson r :    -0.000142
QLIKE     :     0.497030

5 day(s)
MAE       :   360.229661
RMSE      :   679.931411
R2        : -5478.717553
Pearson r :    -0.000500
QLIKE     :     0.513093

10 day(s)
MAE       :   535.437861
RMSE      :   779.241332
R2        : -7194.606480
Pearson r :    -0.001405
QLIKE     :     0.553222

full horizon
MAE       :   535.437861
RMSE      :   779.241332
R2        : -7194.606480
Pearson r :    -0.001405
QLIKE     :     0.553222
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_BTCUSDT_H10.pkl

[GARCH] EURUSD — horizon=20  (anchors: step=55, feat=0; y_feat=0)
[eval] B=3782, n_fit=3025, horizon=20
[fit] Best by BIC: ('GARCH', 1, 1, 't'), BIC=5848.783

[best config] ('GARCH', 1, 1, 't')
[best params (train fit)]
  omega: 0.066681
  alpha[1]: 0.131620
  beta[1]: 0.724793
  nu: 4.538773

[metrics]

1 day(s)
MAE       :     0.161044
RMSE      :     0.257576
R2        :    -0.453556
Pearson r :     0.184611
QLIKE     :     0.580218

3 day(s)
MAE       :     0.159703
RMSE      :     0.256700
R2        :    -0.454827
Pearson r :     0.169722
QLIKE     :     0.578770

5 day(s)
MAE       :     0.158073
RMSE      :     0.255028
R2        :    -0.452251
Pearson r :     0.169864
QLIKE     :     0.575421

10 day(s)
MAE       :     0.157020
RMSE      :     0.254732
R2        :    -0.462719
Pearson r :     0.139592
QLIKE     :     0.575508

20 day(s)
MAE       :     0.156029
RMSE      :     0.254118
R2        :    -0.472847
Pearson r :     0.108493
QLIKE     :     0.570917

full horizon
MAE       :     0.156029
RMSE      :     0.254118
R2        :    -0.472847
Pearson r :     0.108493
QLIKE     :     0.570917
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_EURUSD_H20.pkl

[GARCH] EURUSD — horizon=1  (anchors: step=55, feat=0; y_feat=0)
[eval] B=3782, n_fit=3025, horizon=1
[fit] Best by BIC: ('GARCH', 1, 1, 't'), BIC=5848.783

[best config] ('GARCH', 1, 1, 't')
[best params (train fit)]
  omega: 0.066681
  alpha[1]: 0.131620
  beta[1]: 0.724793
  nu: 4.538773

[metrics]

1 day(s)
MAE       :     0.161044
RMSE      :     0.257576
R2        :    -0.453556
Pearson r :     0.184611
QLIKE     :     0.580218

full horizon
MAE       :     0.161044
RMSE      :     0.257576
R2        :    -0.453556
Pearson r :     0.184611
QLIKE     :     0.580218
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_EURUSD_H1.pkl

[GARCH] EURUSD — horizon=3  (anchors: step=55, feat=0; y_feat=0)
[eval] B=3782, n_fit=3025, horizon=3
[fit] Best by BIC: ('GARCH', 1, 1, 't'), BIC=5848.783

[best config] ('GARCH', 1, 1, 't')
[best params (train fit)]
  omega: 0.066681
  alpha[1]: 0.131620
  beta[1]: 0.724793
  nu: 4.538773

[metrics]

1 day(s)
MAE       :     0.161044
RMSE      :     0.257576
R2        :    -0.453556
Pearson r :     0.184611
QLIKE     :     0.580218

3 day(s)
MAE       :     0.159703
RMSE      :     0.256700
R2        :    -0.454827
Pearson r :     0.169722
QLIKE     :     0.578770

full horizon
MAE       :     0.159703
RMSE      :     0.256700
R2        :    -0.454827
Pearson r :     0.169722
QLIKE     :     0.578770
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_EURUSD_H3.pkl

[GARCH] EURUSD — horizon=5  (anchors: step=55, feat=0; y_feat=0)
[eval] B=3782, n_fit=3025, horizon=5
[fit] Best by BIC: ('GARCH', 1, 1, 't'), BIC=5848.783

[best config] ('GARCH', 1, 1, 't')
[best params (train fit)]
  omega: 0.066681
  alpha[1]: 0.131620
  beta[1]: 0.724793
  nu: 4.538773

[metrics]

1 day(s)
MAE       :     0.161044
RMSE      :     0.257576
R2        :    -0.453556
Pearson r :     0.184611
QLIKE     :     0.580218

3 day(s)
MAE       :     0.159703
RMSE      :     0.256700
R2        :    -0.454827
Pearson r :     0.169722
QLIKE     :     0.578770

5 day(s)
MAE       :     0.158073
RMSE      :     0.255028
R2        :    -0.452251
Pearson r :     0.169864
QLIKE     :     0.575421

full horizon
MAE       :     0.158073
RMSE      :     0.255028
R2        :    -0.452251
Pearson r :     0.169864
QLIKE     :     0.575421
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_EURUSD_H5.pkl

[GARCH] EURUSD — horizon=10  (anchors: step=55, feat=0; y_feat=0)
[eval] B=3782, n_fit=3025, horizon=10
[fit] Best by BIC: ('GARCH', 1, 1, 't'), BIC=5848.783

[best config] ('GARCH', 1, 1, 't')
[best params (train fit)]
  omega: 0.066681
  alpha[1]: 0.131620
  beta[1]: 0.724793
  nu: 4.538773

[metrics]

1 day(s)
MAE       :     0.161044
RMSE      :     0.257576
R2        :    -0.453556
Pearson r :     0.184611
QLIKE     :     0.580218

3 day(s)
MAE       :     0.159703
RMSE      :     0.256700
R2        :    -0.454827
Pearson r :     0.169722
QLIKE     :     0.578770

5 day(s)
MAE       :     0.158073
RMSE      :     0.255028
R2        :    -0.452251
Pearson r :     0.169864
QLIKE     :     0.575421

10 day(s)
MAE       :     0.157020
RMSE      :     0.254732
R2        :    -0.462719
Pearson r :     0.139592
QLIKE     :     0.575508

full horizon
MAE       :     0.157020
RMSE      :     0.254732
R2        :    -0.462719
Pearson r :     0.139592
QLIKE     :     0.575508
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_EURUSD_H10.pkl

[GARCH] GOLD — horizon=20  (anchors: step=55, feat=0; y_feat=0)
[eval] B=5534, n_fit=4427, horizon=20
[fit] Best by BIC: ('GARCH', 2, 2, 't'), BIC=6662.170

[best config] ('GARCH', 2, 2, 't')
[best params (train fit)]
  omega: 0.042837
  alpha[1]: 0.265957
  alpha[2]: 0.000000
  beta[1]: 0.157845
  beta[2]: 0.434977
  nu: 6.781479

[metrics]

1 day(s)
MAE       :     0.452322
RMSE      :     0.871243
R2        :    -0.767577
Pearson r :     0.180690
QLIKE     :     0.548914

3 day(s)
MAE       :     0.441101
RMSE      :     0.797136
R2        :    -0.478434
Pearson r :     0.188963
QLIKE     :     0.541996

5 day(s)
MAE       :     0.433882
RMSE      :     0.764818
R2        :    -0.357113
Pearson r :     0.190094
QLIKE     :     0.536240

10 day(s)
MAE       :     0.431766
RMSE      :     0.723196
R2        :    -0.199885
Pearson r :     0.187273
QLIKE     :     0.535947

20 day(s)
MAE       :     0.445144
RMSE      :     0.703653
R2        :    -0.119146
Pearson r :     0.149233
QLIKE     :     0.540202

full horizon
MAE       :     0.445144
RMSE      :     0.703653
R2        :    -0.119146
Pearson r :     0.149233
QLIKE     :     0.540202
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_GOLD_H20.pkl

[GARCH] GOLD — horizon=1  (anchors: step=55, feat=0; y_feat=0)
[eval] B=5534, n_fit=4427, horizon=1
[fit] Best by BIC: ('GARCH', 2, 2, 't'), BIC=6662.170

[best config] ('GARCH', 2, 2, 't')
[best params (train fit)]
  omega: 0.042837
  alpha[1]: 0.265957
  alpha[2]: 0.000000
  beta[1]: 0.157845
  beta[2]: 0.434977
  nu: 6.781479

[metrics]

1 day(s)
MAE       :     0.452322
RMSE      :     0.871243
R2        :    -0.767577
Pearson r :     0.180690
QLIKE     :     0.548914

full horizon
MAE       :     0.452322
RMSE      :     0.871243
R2        :    -0.767577
Pearson r :     0.180690
QLIKE     :     0.548914
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_GOLD_H1.pkl

[GARCH] GOLD — horizon=3  (anchors: step=55, feat=0; y_feat=0)
[eval] B=5534, n_fit=4427, horizon=3
[fit] Best by BIC: ('GARCH', 2, 2, 't'), BIC=6662.170

[best config] ('GARCH', 2, 2, 't')
[best params (train fit)]
  omega: 0.042837
  alpha[1]: 0.265957
  alpha[2]: 0.000000
  beta[1]: 0.157845
  beta[2]: 0.434977
  nu: 6.781479

[metrics]

1 day(s)
MAE       :     0.452322
RMSE      :     0.871243
R2        :    -0.767577
Pearson r :     0.180690
QLIKE     :     0.548914

3 day(s)
MAE       :     0.441101
RMSE      :     0.797136
R2        :    -0.478434
Pearson r :     0.188963
QLIKE     :     0.541996

full horizon
MAE       :     0.441101
RMSE      :     0.797136
R2        :    -0.478434
Pearson r :     0.188963
QLIKE     :     0.541996
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_GOLD_H3.pkl

[GARCH] GOLD — horizon=5  (anchors: step=55, feat=0; y_feat=0)
[eval] B=5534, n_fit=4427, horizon=5
[fit] Best by BIC: ('GARCH', 2, 2, 't'), BIC=6662.170

[best config] ('GARCH', 2, 2, 't')
[best params (train fit)]
  omega: 0.042837
  alpha[1]: 0.265957
  alpha[2]: 0.000000
  beta[1]: 0.157845
  beta[2]: 0.434977
  nu: 6.781479

[metrics]

1 day(s)
MAE       :     0.452322
RMSE      :     0.871243
R2        :    -0.767577
Pearson r :     0.180690
QLIKE     :     0.548914

3 day(s)
MAE       :     0.441101
RMSE      :     0.797136
R2        :    -0.478434
Pearson r :     0.188963
QLIKE     :     0.541996

5 day(s)
MAE       :     0.433882
RMSE      :     0.764818
R2        :    -0.357113
Pearson r :     0.190094
QLIKE     :     0.536240

full horizon
MAE       :     0.433882
RMSE      :     0.764818
R2        :    -0.357113
Pearson r :     0.190094
QLIKE     :     0.536240
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_GOLD_H5.pkl

[GARCH] GOLD — horizon=10  (anchors: step=55, feat=0; y_feat=0)
[eval] B=5534, n_fit=4427, horizon=10
[fit] Best by BIC: ('GARCH', 2, 2, 't'), BIC=6662.170

[best config] ('GARCH', 2, 2, 't')
[best params (train fit)]
  omega: 0.042837
  alpha[1]: 0.265957
  alpha[2]: 0.000000
  beta[1]: 0.157845
  beta[2]: 0.434977
  nu: 6.781479

[metrics]

1 day(s)
MAE       :     0.452322
RMSE      :     0.871243
R2        :    -0.767577
Pearson r :     0.180690
QLIKE     :     0.548914

3 day(s)
MAE       :     0.441101
RMSE      :     0.797136
R2        :    -0.478434
Pearson r :     0.188963
QLIKE     :     0.541996

5 day(s)
MAE       :     0.433882
RMSE      :     0.764818
R2        :    -0.357113
Pearson r :     0.190094
QLIKE     :     0.536240

10 day(s)
MAE       :     0.431766
RMSE      :     0.723196
R2        :    -0.199885
Pearson r :     0.187273
QLIKE     :     0.535947

full horizon
MAE       :     0.431766
RMSE      :     0.723196
R2        :    -0.199885
Pearson r :     0.187273
QLIKE     :     0.535947
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_GOLD_H10.pkl

[GARCH] SP500 — horizon=20  (anchors: step=55, feat=0; y_feat=0)
[eval] B=3466, n_fit=2772, horizon=20
[fit] Best by BIC: ('GARCH', 1, 1, 't'), BIC=1457.339

[best config] ('GARCH', 1, 1, 't')
[best params (train fit)]
  omega: 0.026327
  alpha[1]: 0.498665
  beta[1]: 0.342041
  nu: 7.394411

[metrics]

1 day(s)
MAE       :     6.334555
RMSE      :    67.344524
R2        :  -564.611368
Pearson r :     0.075889
QLIKE     :     1.078707

3 day(s)
MAE       :     5.636203
RMSE      :    57.755112
R2        :  -414.980930
Pearson r :     0.075584
QLIKE     :     1.065490

5 day(s)
MAE       :     5.090601
RMSE      :    50.512924
R2        :  -317.179877
Pearson r :     0.064283
QLIKE     :     1.074019

10 day(s)
MAE       :     4.106294
RMSE      :    38.795260
R2        :  -186.657621
Pearson r :     0.051327
QLIKE     :     1.074098

20 day(s)
MAE       :     3.102543
RMSE      :    27.942745
R2        :   -96.340929
Pearson r :     0.036919
QLIKE     :     1.055650

full horizon
MAE       :     3.102543
RMSE      :    27.942745
R2        :   -96.340929
Pearson r :     0.036919
QLIKE     :     1.055650
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_SP500_H20.pkl

[GARCH] SP500 — horizon=1  (anchors: step=55, feat=0; y_feat=0)
[eval] B=3466, n_fit=2772, horizon=1
[fit] Best by BIC: ('GARCH', 1, 1, 't'), BIC=1457.339

[best config] ('GARCH', 1, 1, 't')
[best params (train fit)]
  omega: 0.026327
  alpha[1]: 0.498665
  beta[1]: 0.342041
  nu: 7.394411

[metrics]

1 day(s)
MAE       :     6.334555
RMSE      :    67.344524
R2        :  -564.611368
Pearson r :     0.075889
QLIKE     :     1.078707

full horizon
MAE       :     6.334555
RMSE      :    67.344524
R2        :  -564.611368
Pearson r :     0.075889
QLIKE     :     1.078707
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_SP500_H1.pkl

[GARCH] SP500 — horizon=3  (anchors: step=55, feat=0; y_feat=0)
[eval] B=3466, n_fit=2772, horizon=3
[fit] Best by BIC: ('GARCH', 1, 1, 't'), BIC=1457.339

[best config] ('GARCH', 1, 1, 't')
[best params (train fit)]
  omega: 0.026327
  alpha[1]: 0.498665
  beta[1]: 0.342041
  nu: 7.394411

[metrics]

1 day(s)
MAE       :     6.334555
RMSE      :    67.344524
R2        :  -564.611368
Pearson r :     0.075889
QLIKE     :     1.078707

3 day(s)
MAE       :     5.636203
RMSE      :    57.755112
R2        :  -414.980930
Pearson r :     0.075584
QLIKE     :     1.065490

full horizon
MAE       :     5.636203
RMSE      :    57.755112
R2        :  -414.980930
Pearson r :     0.075584
QLIKE     :     1.065490
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_SP500_H3.pkl

[GARCH] SP500 — horizon=5  (anchors: step=55, feat=0; y_feat=0)
[eval] B=3466, n_fit=2772, horizon=5
[fit] Best by BIC: ('GARCH', 1, 1, 't'), BIC=1457.339

[best config] ('GARCH', 1, 1, 't')
[best params (train fit)]
  omega: 0.026327
  alpha[1]: 0.498665
  beta[1]: 0.342041
  nu: 7.394411

[metrics]

1 day(s)
MAE       :     6.334555
RMSE      :    67.344524
R2        :  -564.611368
Pearson r :     0.075889
QLIKE     :     1.078707

3 day(s)
MAE       :     5.636203
RMSE      :    57.755112
R2        :  -414.980930
Pearson r :     0.075584
QLIKE     :     1.065490

5 day(s)
MAE       :     5.090601
RMSE      :    50.512924
R2        :  -317.179877
Pearson r :     0.064283
QLIKE     :     1.074019

full horizon
MAE       :     5.090601
RMSE      :    50.512924
R2        :  -317.179877
Pearson r :     0.064283
QLIKE     :     1.074019
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_SP500_H5.pkl

[GARCH] SP500 — horizon=10  (anchors: step=55, feat=0; y_feat=0)
[eval] B=3466, n_fit=2772, horizon=10
[fit] Best by BIC: ('GARCH', 1, 1, 't'), BIC=1457.339

[best config] ('GARCH', 1, 1, 't')
[best params (train fit)]
  omega: 0.026327
  alpha[1]: 0.498665
  beta[1]: 0.342041
  nu: 7.394411

[metrics]

1 day(s)
MAE       :     6.334555
RMSE      :    67.344524
R2        :  -564.611368
Pearson r :     0.075889
QLIKE     :     1.078707

3 day(s)
MAE       :     5.636203
RMSE      :    57.755112
R2        :  -414.980930
Pearson r :     0.075584
QLIKE     :     1.065490

5 day(s)
MAE       :     5.090601
RMSE      :    50.512924
R2        :  -317.179877
Pearson r :     0.064283
QLIKE     :     1.074019

10 day(s)
MAE       :     4.106294
RMSE      :    38.795260
R2        :  -186.657621
Pearson r :     0.051327
QLIKE     :     1.074098

full horizon
MAE       :     4.106294
RMSE      :    38.795260
R2        :  -186.657621
Pearson r :     0.051327
QLIKE     :     1.074098
[GARCH] Saved: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/garch_models/GARCH_SP500_H10.pkl

Test call¶

In [135]:
import os, pickle, gzip, tempfile

#tickers  = ["GOLD", "BTCUSDT", "SP500"]
tickers  = ["EURUSD", "GOLD", "SP500"]
horizons = [1, 20]
model_list = [
    "Simple_MLP",
]

FIXED_PARAMS = {
    "Simple_MLP": dict(
        lr=1e-3, dropout=0.0, l2_weight=1e-4, batch_size=64, hidden_layers=2, hidden_dim=64,
        epochs=50, patience=10, min_epochs=10, min_delta=1e-4, target_mode="log_mse"
    ),
    "Simple_LSTM": dict(
        lr=5e-3, dropout=0.0, l2_weight=1e-5, batch_size=16, hidden_layers=4, hidden_dim=128,   
        epochs=50, patience=5, min_epochs=20, min_delta=1e-4
    ),
    "Simple_KAN": dict(
        lr=5e-4, dropout=0.0, l2_weight=1e-5, batch_size=32, hidden_layers=3, hidden_dim=32, knots=8, spline_power=5,
        epochs=50, patience=5, min_epochs=20, min_delta=1e-4
    ),
    "LSTM_KAN": dict(
        lr=5e-4, dropout=0.0, l2_weight=1e-5, batch_size=16, hidden_layers=2, hidden_dim=128, knots=8, spline_power=5,
        epochs=50, patience=10, min_epochs=30, min_delta=1e-4
    ),
    "TimesNet": dict(
        lr=5e-3, l2_weight=5e-5, batch_size=512, d_model=128, d_ff=256, n_heads=4, e_layers=4, dropout=0.0,
        epochs=50, patience=10, min_epochs=30, min_delta=1e-4
    ),
    "ITransformer": dict(
        lr=5e-3, l2_weight=5e-5, batch_size=512, d_model=128, d_ff=256, n_heads=4, e_layers=4, dropout=0.0,
        epochs=50, patience=10, min_epochs=30, min_delta=1e-4
    ),
}

COMMON = dict(
    use_nested_cv=False,
    single_holdout=True,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,
    verbose=True,
)


# test_param_grids = {
#     "Simple_MLP": {
#         "lr": [5e-3],
#         "batch_size": [64, 512],
#         "hidden_layers": [2, 4],   
#         "hidden_dim": [64, 512],
#         "dropout": [0.0],          
#         "l2_weight": [1e-5],
#     }
# }



BASE_SAVE_DIR = os.path.join(root_folder, objects_relative_path, "runs_timeseries_test")
os.makedirs(BASE_SAVE_DIR, exist_ok=True)


print(X_price_map["EURUSD"].shape)

results_store_test = run_all_models_for_all(
    tickers=tickers,
    horizons=horizons,
    model_list=model_list,
    X_price_map=X_price_map,
    X_time_map=X_time_map,
    y_map=y_map,
    base_save_dir=BASE_SAVE_DIR,
    fixed_params=FIXED_PARAMS,
    common_params=COMMON,
    model_io=MODEL_IO,
    multitask_models=MULTI_TASK_MODELS,
    param_grids=None, #test_param_grids,
    grid_tickers=None,
    grid_horizons=None,
    results_store=None,
    overwrite=True
)


all_results_test = run_garch_over_dict(
    structured_data_dict=structured_data_dict_8,
    tickers=["EURUSD","GOLD"],
    horizons=[1,5,20],
    results_dict={},
    anchor_step=20,
    anchor_feature_idx=0,
    y_feature_idx=0,
    split_ratio=0.8,
    roll_window=250,
    mean_mode="Zero",
    candidates=[("GARCH", 1, 1, "t")], 
    sim_paths=0,                        
    metric_horizons=(1,5,20,-1),
    verbose=True,
    save_dir=None
)



ex_ticker, ex_H, ex_model = "GOLD",20, "Simple_MLP"
if ex_ticker in all_results_test and ex_H in all_results_test[ex_ticker] and ex_model in all_results_test[ex_ticker][ex_H]:
    bundle = all_results_test[ex_ticker][ex_H][ex_model]
    print(f"- {ex_ticker} H={ex_H} {ex_model} metrics keys:",
          list(bundle["results"].keys())[:5], "...")
    print(f"- saved model path: {bundle.get('save_path')}")



SAVE_PATH = os.path.join(root_folder, objects_relative_path, "all_results_test.pkl.gz")
os.makedirs(os.path.dirname(SAVE_PATH), exist_ok=True)

def strip_models(d):
    out = {}
    for tkr, byH in d.items():
        out[tkr] = {}
        for H, byM in byH.items():
            out[tkr][H] = {}
            for mname, payload in byM.items():
                slim = dict(payload)
                
                slim.pop("model", None)
                out[tkr][H][mname] = slim
    return out

to_save_test = strip_models(all_results_test)

with tempfile.NamedTemporaryFile(dir=os.path.dirname(SAVE_PATH), delete=False) as tmp:
    with gzip.GzipFile(fileobj=tmp, mode="wb") as f:
        pickle.dump(to_save_test, f, protocol=pickle.HIGHEST_PROTOCOL)
    tmp_name = tmp.name

os.replace(tmp_name, SAVE_PATH)
print(f"Saved merged results to: {SAVE_PATH}")
(3782, 60, 1)

=== EURUSD | H=1 | Simple_MLP (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525025
  Min value:  -0.7148707238463011
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.9963965101732293
  Min value:  -3.718602223017603
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.408921165171427
  Min value:  -0.7103978753601863
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.319059579642504
  Min value:  -2.975928211620611
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144182
  Min value:  -0.7173043382357188
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3834322546316056
  Min value:  -5.349969967608034
Epoch 1: Train mse = 0.6608 | Val mse = 0.3995
Epoch 2: Train mse = 0.4878 | Val mse = 0.3860
Epoch 3: Train mse = 0.3254 | Val mse = 0.3415
Epoch 4: Train mse = 0.3088 | Val mse = 0.3321
Epoch 5: Train mse = 0.2757 | Val mse = 0.3277
Epoch 6: Train mse = 0.2905 | Val mse = 0.3525
Epoch 7: Train mse = 0.2569 | Val mse = 0.3329
Epoch 8: Train mse = 0.2488 | Val mse = 0.3487
Epoch 9: Train mse = 0.2260 | Val mse = 0.3284
Epoch 10: Train mse = 0.2067 | Val mse = 0.3476
Epoch 11: Train mse = 0.1894 | Val mse = 0.3323
Epoch 12: Train mse = 0.1771 | Val mse = 0.3377
Epoch 13: Train mse = 0.1674 | Val mse = 0.3442
Epoch 14: Train mse = 0.1617 | Val mse = 0.3686
Epoch 15: Train mse = 0.1514 | Val mse = 0.4189
Early stopping triggered at epoch 15.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 1
dropout: 0.00000000
lr: 0.00100000
epochs: 50
batch_size: 64
device: mps
verbose: True
hidden_layers: 2
no_tasks: 1
l2_weight: 0.00010000
patience: 10
min_epochs: 10
min_delta: 0.00010000
hidden_dim: 64
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.08919762
1 day(s) RMSE                      : 0.17621370
1 day(s) R2                        : 0.31969810
1 day(s) Pearson r                 : 0.58196664
1 day(s) QLIKE                     : 0.41610350
full horizon MAE                   : 0.08919762
full horizon RMSE                  : 0.17621370
full horizon R2                    : 0.31969810
full horizon Pearson r             : 0.58196664
full horizon QLIKE                 : 0.41610350

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/runs_timeseries_test/EURUSD/Simple_MLP_H1.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00600063, max=0.943153

=== EURUSD | H=20 | Simple_MLP (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525025
  Min value:  -0.7148707238463011
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.998957169490066
  Min value:  -3.7200073694899434
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.408921165171427
  Min value:  -0.7103978753601863
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3212202110774225
  Min value:  -2.9768947428361368
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144182
  Min value:  -0.7173043382357188
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3856309038737162
  Min value:  -5.3523385822659755
Epoch 1: Train mse = 0.8544 | Val mse = 0.7737
Epoch 2: Train mse = 0.5614 | Val mse = 0.5585
Epoch 3: Train mse = 0.4420 | Val mse = 0.4830
Epoch 4: Train mse = 0.3990 | Val mse = 0.4512
Epoch 5: Train mse = 0.3803 | Val mse = 0.4250
Epoch 6: Train mse = 0.3656 | Val mse = 0.4103
Epoch 7: Train mse = 0.3517 | Val mse = 0.3997
Epoch 8: Train mse = 0.3406 | Val mse = 0.3939
Epoch 9: Train mse = 0.3318 | Val mse = 0.3903
Epoch 10: Train mse = 0.3240 | Val mse = 0.3884
Epoch 11: Train mse = 0.3172 | Val mse = 0.3873
Epoch 12: Train mse = 0.3113 | Val mse = 0.3868
Epoch 13: Train mse = 0.3060 | Val mse = 0.3868
Epoch 14: Train mse = 0.3012 | Val mse = 0.3873
Epoch 15: Train mse = 0.2970 | Val mse = 0.3882
Epoch 16: Train mse = 0.2931 | Val mse = 0.3893
Epoch 17: Train mse = 0.2896 | Val mse = 0.3908
Epoch 18: Train mse = 0.2863 | Val mse = 0.3923
Epoch 19: Train mse = 0.2837 | Val mse = 0.3946
Epoch 20: Train mse = 0.2805 | Val mse = 0.3961
Epoch 21: Train mse = 0.2807 | Val mse = 0.4004
Epoch 22: Train mse = 0.2789 | Val mse = 0.4010
Early stopping triggered at epoch 22.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 20
dropout: 0.00000000
lr: 0.00100000
epochs: 50
batch_size: 64
device: mps
verbose: True
hidden_layers: 2
no_tasks: 1
l2_weight: 0.00010000
patience: 10
min_epochs: 10
min_delta: 0.00010000
hidden_dim: 64
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.08452395
1 day(s) RMSE                      : 0.17544610
1 day(s) R2                        : 0.32561206
1 day(s) Pearson r                 : 0.60415090
1 day(s) QLIKE                     : 0.37362880
3 day(s) MAE                       : 0.08768875
3 day(s) RMSE                      : 0.17907247
3 day(s) R2                        : 0.29202553
3 day(s) Pearson r                 : 0.56785289
3 day(s) QLIKE                     : 0.41967778
5 day(s) MAE                       : 0.08924005
5 day(s) RMSE                      : 0.18413082
5 day(s) R2                        : 0.24295755
5 day(s) Pearson r                 : 0.52254482
5 day(s) QLIKE                     : 0.42620857
10 day(s) MAE                      : 0.09066161
10 day(s) RMSE                     : 0.18931449
10 day(s) R2                       : 0.19209390
10 day(s) Pearson r                : 0.47360505
10 day(s) QLIKE                    : 0.44704286
20 day(s) MAE                      : 0.09247805
20 day(s) RMSE                     : 0.19310444
20 day(s) R2                       : 0.14950877
20 day(s) Pearson r                : 0.43119811
20 day(s) QLIKE                    : 0.47442451
full horizon MAE                   : 0.09247805
full horizon RMSE                  : 0.19310444
full horizon R2                    : 0.14950877
full horizon Pearson r             : 0.43119811
full horizon QLIKE                 : 0.47442451

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/runs_timeseries_test/EURUSD/Simple_MLP_H20.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00463019, max=0.80387

=== GOLD | H=1 | Simple_MLP (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 5534
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  17.63876776611196
  Min value:  -0.5790835075307484
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.28323490000094
  Min value:  -5.250298730157779
Checking X_price_val:
Shape: (443, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722398346
  Min value:  -0.5755730380699846
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.7902797110229713
  Min value:  -2.119473437475616
Checking X_price_test:
Shape: (1107, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698947
  Min value:  -0.5768892216495461
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5405778347636028
  Min value:  -2.3606736150182797
Epoch 1: Train mse = 0.7753 | Val mse = 0.3612
Epoch 2: Train mse = 0.4954 | Val mse = 0.3113
Epoch 3: Train mse = 0.3452 | Val mse = 0.2997
Epoch 4: Train mse = 0.3138 | Val mse = 0.2999
Epoch 5: Train mse = 0.2968 | Val mse = 0.3057
Epoch 6: Train mse = 0.2620 | Val mse = 0.3054
Epoch 7: Train mse = 0.2454 | Val mse = 0.3038
Epoch 8: Train mse = 0.2316 | Val mse = 0.3043
Epoch 9: Train mse = 0.2144 | Val mse = 0.3039
Epoch 10: Train mse = 0.1991 | Val mse = 0.3069
Epoch 11: Train mse = 0.1886 | Val mse = 0.3074
Epoch 12: Train mse = 0.1728 | Val mse = 0.3034
Epoch 13: Train mse = 0.1679 | Val mse = 0.3063
Early stopping triggered at epoch 13.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 1
dropout: 0.00000000
lr: 0.00100000
epochs: 50
batch_size: 64
device: mps
verbose: True
hidden_layers: 2
no_tasks: 1
l2_weight: 0.00010000
patience: 10
min_epochs: 10
min_delta: 0.00010000
hidden_dim: 64
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.32007091
1 day(s) RMSE                      : 0.56865279
1 day(s) R2                        : 0.24700192
1 day(s) Pearson r                 : 0.51610008
1 day(s) QLIKE                     : 0.57212185
full horizon MAE                   : 0.32007091
full horizon RMSE                  : 0.56865279
full horizon R2                    : 0.24700192
full horizon Pearson r             : 0.51610008
full horizon QLIKE                 : 0.57212185

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/runs_timeseries_test/GOLD/Simple_MLP_H1.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.0063539, max=2.17653

=== GOLD | H=20 | Simple_MLP (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 5534
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  17.63876776611196
  Min value:  -0.5790835075307484
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.2870228653696634
  Min value:  -5.257736685381005
Checking X_price_val:
Shape: (443, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722398346
  Min value:  -0.5755730380699846
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.793333110638287
  Min value:  -2.1222460659281195
Checking X_price_test:
Shape: (1107, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698947
  Min value:  -0.5768892216495461
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5432591469134391
  Min value:  -2.3638056623268993
Epoch 1: Train mse = 0.9324 | Val mse = 0.4719
Epoch 2: Train mse = 0.7355 | Val mse = 0.4365
Epoch 3: Train mse = 0.6123 | Val mse = 0.4017
Epoch 4: Train mse = 0.5212 | Val mse = 0.4023
Epoch 5: Train mse = 0.4588 | Val mse = 0.3947
Epoch 6: Train mse = 0.4225 | Val mse = 0.3857
Epoch 7: Train mse = 0.3955 | Val mse = 0.3774
Epoch 8: Train mse = 0.3748 | Val mse = 0.3702
Epoch 9: Train mse = 0.3596 | Val mse = 0.3621
Epoch 10: Train mse = 0.3481 | Val mse = 0.3546
Epoch 11: Train mse = 0.3377 | Val mse = 0.3478
Epoch 12: Train mse = 0.3279 | Val mse = 0.3429
Epoch 13: Train mse = 0.3195 | Val mse = 0.3398
Epoch 14: Train mse = 0.3130 | Val mse = 0.3379
Epoch 15: Train mse = 0.3097 | Val mse = 0.3374
Epoch 16: Train mse = 0.3118 | Val mse = 0.3382
Epoch 17: Train mse = 0.3121 | Val mse = 0.3339
Epoch 18: Train mse = 0.3010 | Val mse = 0.3260
Epoch 19: Train mse = 0.2863 | Val mse = 0.3208
Epoch 20: Train mse = 0.2768 | Val mse = 0.3168
Epoch 21: Train mse = 0.2737 | Val mse = 0.3142
Epoch 22: Train mse = 0.2794 | Val mse = 0.3142
Epoch 23: Train mse = 0.2816 | Val mse = 0.3143
Epoch 24: Train mse = 0.2785 | Val mse = 0.3178
Epoch 25: Train mse = 0.2766 | Val mse = 0.3228
Epoch 26: Train mse = 0.2768 | Val mse = 0.3258
Epoch 27: Train mse = 0.2798 | Val mse = 0.3276
Epoch 28: Train mse = 0.2740 | Val mse = 0.3172
Epoch 29: Train mse = 0.2615 | Val mse = 0.3152
Epoch 30: Train mse = 0.2590 | Val mse = 0.3141
Epoch 31: Train mse = 0.2529 | Val mse = 0.3084
Epoch 32: Train mse = 0.2418 | Val mse = 0.3083
Epoch 33: Train mse = 0.2375 | Val mse = 0.3087
Epoch 34: Train mse = 0.2378 | Val mse = 0.3106
Epoch 35: Train mse = 0.2464 | Val mse = 0.3147
Epoch 36: Train mse = 0.2494 | Val mse = 0.3108
Epoch 37: Train mse = 0.2409 | Val mse = 0.3149
Epoch 38: Train mse = 0.2433 | Val mse = 0.3209
Epoch 39: Train mse = 0.2540 | Val mse = 0.3230
Epoch 40: Train mse = 0.2555 | Val mse = 0.3234
Epoch 41: Train mse = 0.2498 | Val mse = 0.3214
Epoch 42: Train mse = 0.2428 | Val mse = 0.3199
Early stopping triggered at epoch 42.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 20
dropout: 0.00000000
lr: 0.00100000
epochs: 50
batch_size: 64
device: mps
verbose: True
hidden_layers: 2
no_tasks: 1
l2_weight: 0.00010000
patience: 10
min_epochs: 10
min_delta: 0.00010000
hidden_dim: 64
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.29238660
1 day(s) RMSE                      : 0.55577848
1 day(s) R2                        : 0.28071174
1 day(s) Pearson r                 : 0.57670710
1 day(s) QLIKE                     : 0.47645527
3 day(s) MAE                       : 0.30724076
3 day(s) RMSE                      : 0.58533270
3 day(s) R2                        : 0.20284588
3 day(s) Pearson r                 : 0.52899745
3 day(s) QLIKE                     : 0.50072996
5 day(s) MAE                       : 0.31478991
5 day(s) RMSE                      : 0.60132274
5 day(s) R2                        : 0.16109141
5 day(s) Pearson r                 : 0.49529540
5 day(s) QLIKE                     : 0.51605551
10 day(s) MAE                      : 0.32736396
10 day(s) RMSE                     : 0.62665027
10 day(s) R2                       : 0.09909648
10 day(s) Pearson r                : 0.44347614
10 day(s) QLIKE                    : 0.54242719
20 day(s) MAE                      : 0.34726557
20 day(s) RMSE                     : 0.65913494
20 day(s) R2                       : 0.01798373
20 day(s) Pearson r                : 0.37493238
20 day(s) QLIKE                    : 0.58685642
full horizon MAE                   : 0.34726557
full horizon RMSE                  : 0.65913494
full horizon R2                    : 0.01798373
full horizon Pearson r             : 0.37493238
full horizon QLIKE                 : 0.58685642

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/runs_timeseries_test/GOLD/Simple_MLP_H20.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.00879674, max=5.31712

=== SP500 | H=1 | Simple_MLP (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3466
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790344
  Min value:  -0.29186160079947243
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.598103080699805
  Min value:  -18.186962127218408
Checking X_price_val:
Shape: (277, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.189039765537116
  Min value:  -0.288808876061582
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3850009093345803
  Min value:  -2.4181466917899535
Checking X_price_test:
Shape: (694, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.65876861006644
  Min value:  -0.2909517549064528
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.3795163284544953
  Min value:  -3.2441546953028633
Epoch 1: Train mse = 0.8891 | Val mse = 0.7812
Epoch 2: Train mse = 0.7106 | Val mse = 0.6560
Epoch 3: Train mse = 0.6653 | Val mse = 0.5375
Epoch 4: Train mse = 0.5831 | Val mse = 0.5014
Epoch 5: Train mse = 0.5618 | Val mse = 0.5017
Epoch 6: Train mse = 0.5297 | Val mse = 0.5077
Epoch 7: Train mse = 0.4951 | Val mse = 0.5260
Epoch 8: Train mse = 0.4742 | Val mse = 0.5107
Epoch 9: Train mse = 0.4468 | Val mse = 0.5205
Epoch 10: Train mse = 0.4446 | Val mse = 0.4900
Epoch 11: Train mse = 0.4120 | Val mse = 0.5517
Epoch 12: Train mse = 0.4325 | Val mse = 0.4902
Epoch 13: Train mse = 0.3870 | Val mse = 0.5490
Epoch 14: Train mse = 0.4261 | Val mse = 0.5298
Epoch 15: Train mse = 0.3708 | Val mse = 0.5616
Epoch 16: Train mse = 0.3896 | Val mse = 0.6111
Epoch 17: Train mse = 0.3445 | Val mse = 0.5591
Epoch 18: Train mse = 0.3347 | Val mse = 0.5615
Epoch 19: Train mse = 0.3224 | Val mse = 0.5864
Epoch 20: Train mse = 0.3223 | Val mse = 0.5622
Early stopping triggered at epoch 20.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 1
dropout: 0.00000000
lr: 0.00100000
epochs: 50
batch_size: 64
device: mps
verbose: True
hidden_layers: 2
no_tasks: 1
l2_weight: 0.00010000
patience: 10
min_epochs: 10
min_delta: 0.00010000
hidden_dim: 64
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.55122681
1 day(s) RMSE                      : 2.30113331
1 day(s) R2                        : 0.33961513
1 day(s) Pearson r                 : 0.60973808
1 day(s) QLIKE                     : 0.54568460
full horizon MAE                   : 0.55122681
full horizon RMSE                  : 2.30113331
full horizon R2                    : 0.33961513
full horizon Pearson r             : 0.60973808
full horizon QLIKE                 : 0.54568460

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/runs_timeseries_test/SP500/Simple_MLP_H1.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.00721551, max=12.551

=== SP500 | H=20 | Simple_MLP (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3466
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790344
  Min value:  -0.29186160079947243
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.5963112311058607
  Min value:  -18.187442810157716
Checking X_price_val:
Shape: (277, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.189039765537116
  Min value:  -0.288808876061582
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3832820721048678
  Min value:  -2.446834413184202
Checking X_price_test:
Shape: (694, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.65876861006644
  Min value:  -0.2909517549064528
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.377737634830384
  Min value:  -3.2455347334033506
Epoch 1: Train mse = 0.9955 | Val mse = 0.9399
Epoch 2: Train mse = 0.8933 | Val mse = 0.8310
Epoch 3: Train mse = 0.8626 | Val mse = 0.7989
Epoch 4: Train mse = 0.8334 | Val mse = 0.7690
Epoch 5: Train mse = 0.8152 | Val mse = 0.7538
Epoch 6: Train mse = 0.7933 | Val mse = 0.7265
Epoch 7: Train mse = 0.7745 | Val mse = 0.7115
Epoch 8: Train mse = 0.7622 | Val mse = 0.6946
Epoch 9: Train mse = 0.7523 | Val mse = 0.6820
Epoch 10: Train mse = 0.7447 | Val mse = 0.6646
Epoch 11: Train mse = 0.7356 | Val mse = 0.6545
Epoch 12: Train mse = 0.7188 | Val mse = 0.6498
Epoch 13: Train mse = 0.7116 | Val mse = 0.6617
Epoch 14: Train mse = 0.6998 | Val mse = 0.6395
Epoch 15: Train mse = 0.6941 | Val mse = 0.6405
Epoch 16: Train mse = 0.6888 | Val mse = 0.6261
Epoch 17: Train mse = 0.6771 | Val mse = 0.6283
Epoch 18: Train mse = 0.6704 | Val mse = 0.6283
Epoch 19: Train mse = 0.6643 | Val mse = 0.6302
Epoch 20: Train mse = 0.6549 | Val mse = 0.6262
Epoch 21: Train mse = 0.6513 | Val mse = 0.6340
Epoch 22: Train mse = 0.6407 | Val mse = 0.6303
Epoch 23: Train mse = 0.6361 | Val mse = 0.6316
Epoch 24: Train mse = 0.6252 | Val mse = 0.6351
Epoch 25: Train mse = 0.6220 | Val mse = 0.6315
Epoch 26: Train mse = 0.6080 | Val mse = 0.6322
Early stopping triggered at epoch 26.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 20
dropout: 0.00000000
lr: 0.00100000
epochs: 50
batch_size: 64
device: mps
verbose: True
hidden_layers: 2
no_tasks: 1
l2_weight: 0.00010000
patience: 10
min_epochs: 10
min_delta: 0.00010000
hidden_dim: 64
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.55215352
1 day(s) RMSE                      : 2.48234127
1 day(s) R2                        : 0.23151299
1 day(s) Pearson r                 : 0.53407678
1 day(s) QLIKE                     : 0.47378945
3 day(s) MAE                       : 0.57915858
3 day(s) RMSE                      : 2.55154316
3 day(s) R2                        : 0.18810775
3 day(s) Pearson r                 : 0.47458197
3 day(s) QLIKE                     : 0.54040077
5 day(s) MAE                       : 0.60104043
5 day(s) RMSE                      : 2.63407078
5 day(s) R2                        : 0.13478794
5 day(s) Pearson r                 : 0.39365122
5 day(s) QLIKE                     : 0.62935035
10 day(s) MAE                      : 0.62264108
10 day(s) RMSE                     : 2.73910211
10 day(s) R2                       : 0.06453979
10 day(s) Pearson r                : 0.27627694
10 day(s) QLIKE                    : 0.77801216
20 day(s) MAE                      : 0.64834771
20 day(s) RMSE                     : 2.79859961
20 day(s) R2                       : 0.02357500
20 day(s) Pearson r                : 0.19407947
20 day(s) QLIKE                    : 0.85499450
full horizon MAE                   : 0.64834771
full horizon RMSE                  : 2.79859961
full horizon R2                    : 0.02357500
full horizon Pearson r             : 0.19407947
full horizon QLIKE                 : 0.85499450

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/runs_timeseries_test/SP500/Simple_MLP_H20.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.0238434, max=7.35137

[GARCH] EURUSD — H=1 (step=20, x_feat=0, y_feat=0)
[vol rolling] B=3782, split=0.8 -> n_train=3025, t0=3024, anchors=758, H=1, roll_window=250
  anchor 3024/3781: sd=0.301101, h1_pred=0.205496
  anchor 3175/3781: sd=0.328803, h1_pred=0.0848216
  anchor 3326/3781: sd=0.149937, h1_pred=0.0124112
  anchor 3477/3781: sd=0.10242, h1_pred=0.0106819
  anchor 3628/3781: sd=0.0917164, h1_pred=0.00642232
  anchor 3779/3781: sd=0.197916, h1_pred=0.0239315

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 0.161562
RMSE      : 0.258397
R2        : -0.459119
Pearson r : 0.216443
QLIKE     : 0.793928

full horizon
MAE       : 0.161562
RMSE      : 0.258397
R2        : -0.459119
Pearson r : 0.216443
QLIKE     : 0.793928

[GARCH] EURUSD — H=5 (step=20, x_feat=0, y_feat=0)
[vol rolling] B=3782, split=0.8 -> n_train=3025, t0=3024, anchors=758, H=5, roll_window=250
  anchor 3024/3781: sd=0.301101, h1_pred=0.205496
  anchor 3175/3781: sd=0.328803, h1_pred=0.0848216
  anchor 3326/3781: sd=0.149937, h1_pred=0.0124112
  anchor 3477/3781: sd=0.10242, h1_pred=0.0106819
  anchor 3628/3781: sd=0.0917164, h1_pred=0.00642232
  anchor 3779/3781: sd=0.197916, h1_pred=0.0239315

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 0.161562
RMSE      : 0.258397
R2        : -0.459119
Pearson r : 0.216443
QLIKE     : 0.793928

5 day(s)
MAE       : 0.160722
RMSE      : 0.256832
R2        : -0.462196
Pearson r : 0.208236
QLIKE     : 0.795203

full horizon
MAE       : 0.160722
RMSE      : 0.256832
R2        : -0.462196
Pearson r : 0.208236
QLIKE     : 0.795203

[GARCH] EURUSD — H=20 (step=20, x_feat=0, y_feat=0)
[vol rolling] B=3782, split=0.8 -> n_train=3025, t0=3024, anchors=758, H=20, roll_window=250
  anchor 3024/3781: sd=0.301101, h1_pred=0.205496
  anchor 3175/3781: sd=0.328803, h1_pred=0.0848216
  anchor 3326/3781: sd=0.149937, h1_pred=0.0124112
  anchor 3477/3781: sd=0.10242, h1_pred=0.0106819
  anchor 3628/3781: sd=0.0917164, h1_pred=0.00642232
  anchor 3779/3781: sd=0.197916, h1_pred=0.0239315

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 0.161562
RMSE      : 0.258397
R2        : -0.459119
Pearson r : 0.216443
QLIKE     : 0.793928

5 day(s)
MAE       : 0.160722
RMSE      : 0.256832
R2        : -0.462196
Pearson r : 0.208236
QLIKE     : 0.795203

20 day(s)
MAE       : 0.159353
RMSE      : 0.253765
R2        : -0.464724
Pearson r : 0.219113
QLIKE     : 0.797077

full horizon
MAE       : 0.159353
RMSE      : 0.253765
R2        : -0.464724
Pearson r : 0.219113
QLIKE     : 0.797077

[GARCH] GOLD — H=1 (step=20, x_feat=0, y_feat=0)
[vol rolling] B=5534, split=0.8 -> n_train=4427, t0=4426, anchors=1108, H=1, roll_window=250
  anchor 4426/5533: sd=0.927787, h1_pred=0.473358
  anchor 4647/5533: sd=1.03081, h1_pred=0.318591
  anchor 4868/5533: sd=0.461766, h1_pred=0.175207
  anchor 5089/5533: sd=0.431765, h1_pred=0.149619
  anchor 5310/5533: sd=0.536677, h1_pred=0.178005
  anchor 5531/5533: sd=0.450478, h1_pred=0.203049

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 0.507631
RMSE      : 0.900324
R2        : -0.889048
Pearson r : 0.012688
QLIKE     : 0.650927

full horizon
MAE       : 0.507631
RMSE      : 0.900324
R2        : -0.889048
Pearson r : 0.012688
QLIKE     : 0.650927

[GARCH] GOLD — H=5 (step=20, x_feat=0, y_feat=0)
[vol rolling] B=5534, split=0.8 -> n_train=4427, t0=4426, anchors=1108, H=5, roll_window=250
  anchor 4426/5533: sd=0.927787, h1_pred=0.473358
  anchor 4647/5533: sd=1.03081, h1_pred=0.318591
  anchor 4868/5533: sd=0.461766, h1_pred=0.175207
  anchor 5089/5533: sd=0.431765, h1_pred=0.149619
  anchor 5310/5533: sd=0.536677, h1_pred=0.178005
  anchor 5531/5533: sd=0.450478, h1_pred=0.203049

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 0.507631
RMSE      : 0.900324
R2        : -0.889048
Pearson r : 0.012688
QLIKE     : 0.650927

5 day(s)
MAE       : 0.485309
RMSE      : 0.807028
R2        : -0.512028
Pearson r : -0.008773
QLIKE     : 0.642400

full horizon
MAE       : 0.485309
RMSE      : 0.807028
R2        : -0.512028
Pearson r : -0.008773
QLIKE     : 0.642400

[GARCH] GOLD — H=20 (step=20, x_feat=0, y_feat=0)
[vol rolling] B=5534, split=0.8 -> n_train=4427, t0=4426, anchors=1108, H=20, roll_window=250
  anchor 4426/5533: sd=0.927787, h1_pred=0.473358
  anchor 4647/5533: sd=1.03081, h1_pred=0.318591
  anchor 4868/5533: sd=0.461766, h1_pred=0.175207
  anchor 5089/5533: sd=0.431765, h1_pred=0.149619
  anchor 5310/5533: sd=0.536677, h1_pred=0.178005
  anchor 5531/5533: sd=0.450478, h1_pred=0.203049

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 0.507631
RMSE      : 0.900324
R2        : -0.889048
Pearson r : 0.012688
QLIKE     : 0.650927

5 day(s)
MAE       : 0.485309
RMSE      : 0.807028
R2        : -0.512028
Pearson r : -0.008773
QLIKE     : 0.642400

20 day(s)
MAE       : 0.486546
RMSE      : 0.796783
R2        : -0.435984
Pearson r : -0.038474
QLIKE     : 0.640279

full horizon
MAE       : 0.486546
RMSE      : 0.796783
R2        : -0.435984
Pearson r : -0.038474
QLIKE     : 0.640279
Saved merged results to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/all_results_test.pkl.gz

Printing a data exaple¶

In [720]:
ex_ticker, ex_H, ex_model = "EURUSD",20, "Simple_MLP"
extras = all_results_test[ex_ticker][ex_H][ex_model]
print(extras["y_data"].shape)

print(np.nanmax(extras["y_data"][:, :, 1]))
(757, 20, 2)
0.8038699626922607

Load saved dictionary¶

In [721]:
import pickle, gzip

SAVE_PATH = os.path.join(root_folder, objects_relative_path, "all_results.pkl.gz")
with gzip.open(SAVE_PATH, "rb") as f:
    all_results = pickle.load(f)

print("Loaded. Example tickers:", list(all_results.keys())[:5])
Loaded. Example tickers: ['AAPL', 'MSFT', 'GOLD', 'BTCUSDT', 'SP500']

Routine to print results¶

In [62]:
import numpy as np
import pandas as pd

def build_metric_frames(
    results_store,
    task_name=None,             
    outer_horizon="full",       
    metrics_order=None,          
    round_decimals=6,           
    pretty_print=True,
):

    frames = {}

    default_metrics_order = [
        "MAE (log-var)", "RMSE (log-var)", "R2 (log-var)",
        "MAE (var)", "RMSE (var)", "R2 (var)", "QLIKE (var)"
    ]
    if metrics_order is None:
        metrics_order = default_metrics_order

    def _pick_task(results_blob):
       
        if isinstance(results_blob, dict) and all(isinstance(v, dict) for v in results_blob.values()):
            if task_name and task_name in results_blob:
                return task_name, results_blob[task_name]
            if "Task 1" in results_blob:
                return "Task 1", results_blob["Task 1"]
          
            k0 = sorted(results_blob.keys())[0]
            return k0, results_blob[k0]
   
        return task_name or "Task 1", results_blob

    def _extract_full_metrics(metrics_dict):

        out = {}
        for k, v in metrics_dict.items():
            if isinstance(k, str) and k.startswith("full horizon "):
                measure = k[len("full horizon "):].strip()
                out[measure] = float(v)
        return out

    for ticker, hdict in results_store.items():
        frames[ticker] = {}

     
        model_names = set()
        for h in hdict:
            model_names.update(hdict[h].keys())

        for model in sorted(model_names):
          
            if outer_horizon == "full":
                outer_cols = sorted([h for h in hdict.keys() if model in hdict[h]])
            else:
               
                if outer_horizon in hdict and model in hdict[outer_horizon]:
                    outer_cols = [outer_horizon]
                else:
                    continue  

            all_measures = set()
            per_col_measures = {}  

            for h in outer_cols:
                bundle = hdict[h][model]
                results_blob = bundle.get("results", {})
                _, metrics_for_task = _pick_task(results_blob)

                full_only = _extract_full_metrics(metrics_for_task)
                per_col_measures[h] = full_only
                all_measures.update(full_only.keys())

          
            row_order = [m for m in metrics_order if m in all_measures] + \
                        [m for m in sorted(all_measures) if m not in metrics_order]

           
            data = {
                f"H{h}": [per_col_measures.get(h, {}).get(m, np.nan) for m in row_order]
                for h in outer_cols
            }
            df = pd.DataFrame(data, index=row_order)
            frames[ticker][model] = df

            if pretty_print:
                col_note = (f"all outer horizons {outer_cols}" if outer_horizon == "full"
                            else f"H={outer_horizon}")
                print(f"\n=== {ticker} | {model} | using {col_note} | task={_pick_task(results_blob)[0]} | full-only ===")
                with pd.option_context("display.max_rows", None, "display.max_columns", None):
                    print(df.round(round_decimals).to_string())

    return frames


import os
import re
import numpy as np
import pandas as pd

# assumes build_metric_frames(...) is already defined in scope

def _frames_to_long(frames_dict):
    """
    Convert nested frames {ticker: {model: DataFrame}} into a single long-form DataFrame:
    columns = [ticker, model, metric, horizon, value]
    """
    rows = []
    for ticker, models in frames_dict.items():
        for model, df in models.items():
            if not isinstance(df, pd.DataFrame) or df.empty:
                continue
            for horizon_col in df.columns:
                # Expect columns like "H1", "H5", "H20"
                m = re.match(r"^H(\d+)$", str(horizon_col))
                horizon = int(m.group(1)) if m else horizon_col
                for metric_name, val in df[horizon_col].items():
                    rows.append({
                        "ticker": ticker,
                        "model": model,
                        "metric": metric_name,
                        "horizon": horizon,
                        "value": float(val) if pd.notna(val) else np.nan,
                    })
    if rows:
        return pd.DataFrame(rows, columns=["ticker", "model", "metric", "horizon", "value"])
    return pd.DataFrame(columns=["ticker", "model", "metric", "horizon", "value"])


def export_metrics(
    results_store,
    out_path,
    *,
    task_name=None,
    outer_horizon="full",
    metrics_order=None,
    round_decimals=6,
    pretty_print=False,
):
    frames = build_metric_frames(
        results_store,
        task_name=task_name,
        outer_horizon=outer_horizon,
        metrics_order=metrics_order,
        round_decimals=round_decimals,
        pretty_print=pretty_print,
    )

    out_path = os.path.abspath(out_path)
    root, ext = os.path.splitext(out_path)

    def _ensure_dir(d):
        os.makedirs(d, exist_ok=True)
        return d

    if ext.lower() == ".csv":
        long_df = _frames_to_long(frames)
        long_df.to_csv(out_path, index=False)
        return {"mode": "single_csv", "path": out_path, "rows": len(long_df)}

    if ext.lower() == ".txt":
        lines = []
        for ticker, models in frames.items():
            for model, df in models.items():
                lines.append(f"=== {ticker} | {model} ===")
                if isinstance(df, pd.DataFrame) and not df.empty:
                    lines.append(df.round(round_decimals).to_string())
                else:
                    lines.append("(no data)")
                lines.append("")  # blank line
        with open(out_path, "w", encoding="utf-8") as f:
            f.write("\n".join(lines))
        return {"mode": "text", "path": out_path, "sections": sum(len(v) for v in frames.values())}

    out_dir = _ensure_dir(out_path)

    count = 0
    for ticker, models in frames.items():
        for model, df in models.items():
            safe_t = re.sub(r"[^A-Za-z0-9._-]+", "_", str(ticker))
            safe_m = re.sub(r"[^A-Za-z0-9._-]+", "_", str(model))
            fp = os.path.join(out_dir, f"{safe_t}__{safe_m}.csv")
            (df if isinstance(df, pd.DataFrame) else pd.DataFrame()).to_csv(fp)
            count += 1
    combined_fp = os.path.join(out_dir, "combined_metrics.csv")
    long_df = _frames_to_long(frames)
    long_df.to_csv(combined_fp, index=False)
    return {"mode": "folder", "dir": out_dir, "files_written": count + 1, "combined_path": combined_fp}

Actual print¶

In [723]:
frames = build_metric_frames(results_store_test, outer_horizon="full", pretty_print=True)
=== EURUSD | GARCH | using all outer horizons [1, 20] | task=Task 1 | full-only ===
                 H1       H20
MAE        0.161310  0.159137
Pearson r  0.209875  0.215759
QLIKE      0.794253  0.796778
R2        -0.461172 -0.466228
RMSE       0.258249  0.253547

=== EURUSD | Simple_MLP | using all outer horizons [1, 20] | task=Task 1 | full-only ===
                 H1       H20
MAE        0.089198  0.092478
Pearson r  0.581967  0.431198
QLIKE      0.416104  0.474425
R2         0.319698  0.149509
RMSE       0.176214  0.193104

=== GOLD | GARCH | using all outer horizons [1, 20] | task=Task 1 | full-only ===
                 H1       H20
MAE        0.508063  0.486758
Pearson r  0.012804 -0.038307
QLIKE      0.651075  0.640301
R2        -0.889246 -0.436083
RMSE       0.900730  0.797085

=== GOLD | Simple_MLP | using all outer horizons [1, 20] | task=Task 1 | full-only ===
                 H1       H20
MAE        0.320071  0.347266
Pearson r  0.516100  0.374932
QLIKE      0.572122  0.586856
R2         0.247002  0.017984
RMSE       0.568653  0.659135

=== SP500 | GARCH | using all outer horizons [1, 20] | task=Task 1 | full-only ===
                 H1       H20
MAE        0.943624  0.960721
Pearson r -0.028312 -0.034362
QLIKE      1.434360  1.382856
R2        -0.137066 -0.120473
RMSE       3.019507  2.997934

=== SP500 | Simple_MLP | using all outer horizons [1, 20] | task=Task 1 | full-only ===
                 H1       H20
MAE        0.551227  0.648348
Pearson r  0.609738  0.194079
QLIKE      0.545685  0.854994
R2         0.339615  0.023575
RMSE       2.301133  2.798600

Plot helper¶

In [20]:
import os
import re
import numpy as np
import matplotlib.pyplot as plt

def _parse_horizon_columns(cols):
   
    numeric = {}
    has_full = False
    for c in cols:
        if isinstance(c, (int, np.integer)):
            numeric[int(c)] = c
        elif isinstance(c, str):
            if c.lower() == "full":
                has_full = True
            else:
                m = re.match(r"^[Hh](\d+)$", c)
                if m:
                    numeric[int(m.group(1))] = c
    return sorted(numeric.keys()), has_full, numeric

def plot_metric_vs_horizon(
    frames,
    tickers=None,                 
    models=None,                  
    metrics=None,                 
    include_full=True,             
    save_dir=None,                 
    show=True,                  
    fig_size=(7, 4),              
    dpi=150
):

    if metrics is None:
        mset = set()
        for t, by_model in frames.items():
            for _, df in by_model.items():
                mset |= set(df.index)
        metrics = sorted(mset)

    tickers = tickers or list(frames.keys())
    if save_dir:
        os.makedirs(save_dir, exist_ok=True)

    saved_paths = []

    for t in tickers:
        if t not in frames:
            continue

        
        model_names = models or list(frames[t].keys())
        
        all_h = set()
        has_full_any = False
        per_model_maps = {}

        for m in model_names:
            if m not in frames[t]:
                continue
            df = frames[t][m]
            Hs, has_full, colmap = _parse_horizon_columns(df.columns)
            per_model_maps[m] = (Hs, has_full, colmap)
            all_h |= set(Hs)
            has_full_any |= has_full

 
        if not all_h and not has_full_any:
            continue

        xs_all = sorted(all_h)
      
        x_full = xs_all[-1] + 1 if xs_all else 1

        for metric in metrics:
            plt.figure(figsize=fig_size)
            plotted_any = False

            
            if xs_all:
                for m in model_names:
                    if m not in frames[t]:
                        continue
                    df = frames[t][m]
                    if metric not in df.index:
                        continue
                    Hs, has_full, colmap = per_model_maps[m]

                  
                    ys = []
                    mask = []
                    for h in xs_all:
                        if h in colmap:
                            val = df.at[metric, colmap[h]] if colmap[h] in df.columns else np.nan
                        else:
                            val = np.nan
                        ys.append(val)
                        mask.append(np.isfinite(val))
                    ys = np.array(ys, dtype=float)
                    mask = np.array(mask, dtype=bool)
                    if mask.any():
                        plt.plot(np.array(xs_all)[mask], ys[mask], marker="o", label=m)
                        plotted_any = True

               
                if include_full and has_full_any:
                    for m in model_names:
                        if m not in frames[t]:
                            continue
                        df = frames[t][m]
                        if metric in df.index and any(str(c).lower() == "full" for c in df.columns):
                            y_full = df.at[metric, "full"]
                            if np.isfinite(y_full):
                                plt.scatter([x_full], [y_full], marker="x", label=f"{m} (full)")
                                plotted_any = True

                plt.xlabel("Horizon")
                xticks = xs_all + ([x_full] if include_full and has_full_any else [])
                xticklabels = [str(h) for h in xs_all] + (["full"] if include_full and has_full_any else [])
                plt.xticks(xticks, xticklabels)

         
            elif include_full and has_full_any:
                x_pos = []
                y_vals = []
                labels = []
                for i, m in enumerate(model_names):
                    if m in frames[t] and metric in frames[t][m].index and "full" in frames[t][m].columns:
                        y = frames[t][m].at[metric, "full"]
                        if np.isfinite(y):
                            x_pos.append(i)
                            y_vals.append(y)
                            labels.append(m)
                if y_vals:
                    plt.scatter(x_pos, y_vals, marker="o")
                    plt.xticks(x_pos, labels, rotation=0)
                    plt.xlabel("Model")
                    plotted_any = True

            if not plotted_any:
                plt.close()
                continue

            plt.title(f"{t} — {metric}")
            plt.ylabel(metric)
            plt.grid(True, alpha=0.3)
            plt.legend(loc="best")
            plt.tight_layout()

            if save_dir:
                safe_metric = re.sub(r"[^A-Za-z0-9._-]+", "_", metric)
                fname = f"{t}__{safe_metric}.png"
                path = os.path.join(save_dir, fname)
                plt.savefig(path, dpi=dpi)
                saved_paths.append(path)

            if show:
                plt.show()
            else:
                plt.close()

    return saved_paths

Actual plot¶

In [725]:
saved = plot_metric_vs_horizon(
    frames,
    tickers=None,          
    models=None,           
    metrics=None,         
    include_full=True,
    save_dir="plots/metrics",
    show=True
)
print("Saved", len(saved), "charts to plots/metrics")
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
Saved 15 charts to plots/metrics

Plot of y and predicted y helpers¶

In [21]:
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

def _get_hdict(store, ticker, horizon=1):
    byH = store.get(ticker, {})
    if horizon in byH: return byH[horizon]
    k1 = str(horizon)
    if k1 in byH: return byH[k1]
    k2 = f"H{horizon}"
    if k2 in byH: return byH[k2]
    return {}

def _find_y_blob(payload):
    if isinstance(payload, dict) and "y_data" in payload:
        return payload["y_data"]
    return None

def _pick_full_step_y(y_blob):
    if y_blob is None:
        return None, None
    a = np.asarray(y_blob)
    if a.ndim != 3:
        if a.ndim == 2:
            if a.shape[1] >= 2: return a[:, 0], a[:, 1]
            if a.shape[1] == 1: return None, a[:, 0]
        elif a.ndim == 1:
            return None, a
        return None, None

    B, T, F = a.shape
    last = a[:, T-1, :]
    if F >= 2:
        return last[:, 0], last[:, 1]
    elif F == 1:
        return None, last[:, 0]
    return None, None

def plot_h1_full_from_results(
    results_store,
    tickers=None,
    models=None,
    save_dir=None,
    show=True,
    fig_size=(9, 4),
    dpi=150,
    verbose=True
):
    all_tickers = [t for t in results_store if _get_hdict(results_store, t, 1)]
    tickers = all_tickers if tickers is None else [t for t in tickers if t in all_tickers]

    if save_dir:
        os.makedirs(save_dir, exist_ok=True)

    saved = []
    for t in tickers:
        hdict = _get_hdict(results_store, t, 1)
        if not hdict:
            if verbose: print(f("[skip] {t}: no models at H=1"))
            continue

        model_names = list(hdict.keys()) if models is None else [m for m in models if m in hdict]

        y_true_ref = None
        preds = {}
        lens = []

        for m in model_names:
            y_blob = _find_y_blob(hdict[m])
            yt, yp = _pick_full_step_y(y_blob)
            if yt is not None:
                y_true_ref = yt
                lens.append(len(yt))
                break

        missing = []
        for m in model_names:
            y_blob = _find_y_blob(hdict[m])
            yt, yp = _pick_full_step_y(y_blob)
            if yp is not None and yp.size > 0:
                preds[m] = yp
                lens.append(len(yp))
            else:
                missing.append(m)
            if verbose:
                shape = None
                try: shape = np.asarray(y_blob).shape
                except: pass
                print(f"  [{t} H=1] {m}: blob_shape={shape}, "
                      f"yt={'ok' if yt is not None else 'none'}, "
                      f"yp={'ok' if yp is not None else 'none'}")

        if verbose:
            msg = f"[{t} H=1] actual=" + ("YES" if y_true_ref is not None else "NO")
            if missing: msg += " | no_pred=" + ",".join(missing)
            print(msg)

        if not preds and y_true_ref is None:
            if verbose: print(f"[skip] {t}: nothing to plot at H=1")
            continue

        L = min(lens) if lens else None
        if not L or L <= 0:
            if verbose: print(f"[skip] {t}: invalid lengths")
            continue

        series = {}
        if y_true_ref is not None and len(y_true_ref) >= L:
            series["Actual"] = y_true_ref[-L:]
        for m, yp in preds.items():
            if len(yp) >= L:
                series[f"{m}_pred"] = yp[-L:]

        df_head = pd.DataFrame(series)
        head_rows = min(50, len(df_head))
        print(f"\n[{t} H=1] Aligned series head (first {head_rows} of {len(df_head)} rows):")
      
        with pd.option_context("display.max_rows", 50, "display.width", 120):
            print(df_head.head(50).to_string(index=False, float_format=lambda v: f"{v:.6g}"))

        x = np.arange(L)
        plt.figure(figsize=fig_size)

        if "Actual" in series:
            plt.plot(x, series["Actual"], label="Actual (full)", linewidth=2)

        for m in model_names:
            key = f"{m}_pred"
            if key in series:
                plt.plot(x, series[key], '--', label=f"{m} pred (full)")

        plt.title(f"{t} — Horizon=1 (last step from y_data[..., 1])")
        plt.xlabel("Test index"); plt.ylabel("Value")
        plt.grid(True, alpha=0.3); plt.legend(loc="best"); plt.tight_layout()

        if save_dir:
            out = os.path.join(save_dir, f"{t}_H1_full_actual_vs_models.png")
            plt.savefig(out, dpi=dpi)
            saved.append(out)

        if show: plt.show()
        else: plt.close()

    return saved

Actual y plots¶

In [727]:
_ = plot_h1_full_from_results(
    results_store_test,
    tickers=None,
    models=None,                       
    save_dir="plots/h1_full",
    show=True,
    verbose=True
)
  [EURUSD H=1] Simple_MLP: blob_shape=(757, 1, 2), yt=ok, yp=ok
  [EURUSD H=1] GARCH: blob_shape=(757, 1, 2), yt=ok, yp=ok
[EURUSD H=1] actual=YES

[EURUSD H=1] Aligned series head (first 50 of 757 rows):
    Actual  Simple_MLP_pred  GARCH_pred
  0.739859         0.456605    0.194402
  0.743822         0.439917    0.182659
   1.02363         0.293959    0.163768
 0.0199331        0.0373925    0.147496
   0.52303          0.51497    0.130205
  0.355217         0.449064    0.115887
  0.417086         0.470106    0.155131
  0.266624         0.591673    0.138263
  0.312901         0.575305    0.131967
0.00443731        0.0209627    0.119333
  0.261753         0.306895    0.108023
  0.788937         0.360136    0.102587
  0.598342         0.553496   0.0934497
  0.723261         0.476556   0.0969616
  0.341589         0.207176   0.0889363
 0.0137579        0.0302896   0.0826681
   0.29405         0.230426    0.108359
  0.513047         0.461021   0.0988499
  0.252839         0.565897    0.100185
  0.305402         0.458735    0.094313
  0.226156          0.24895    0.089753
0.00963577        0.0291474   0.0848477
 0.0784947         0.346752    0.082161
  0.199036         0.360084    0.200495
  0.171632         0.273153    0.178106
  0.201223         0.400656    0.171009
  0.353565         0.134514    0.155538
 0.0670925        0.0161179    0.215442
    0.2403         0.253816    0.199108
  0.443555         0.464334    0.174538
  0.418858         0.370152    0.154062
  0.378402          0.32185     0.15851
  0.644091         0.355531    0.139604
 0.0152947          0.03466    0.128518
  0.272258         0.223135    0.114498
   0.23458         0.187277    0.104534
  0.249941         0.402456    0.100107
  0.705648         0.390593    0.104415
  0.293732         0.279373    0.100611
0.00988965        0.0137109   0.0980077
  0.164063         0.225401   0.0984769
  0.414405         0.268784    0.100162
  0.477121         0.314562    0.115875
  0.264156         0.335598    0.126115
  0.210154         0.376182    0.116968
 0.0136265        0.0204071    0.108811
  0.265973         0.252806    0.101425
  0.234507         0.390409   0.0990592
  0.284101           0.3602   0.0973021
  0.226239         0.179596    0.100023
No description has been provided for this image
  [GOLD H=1] Simple_MLP: blob_shape=(1107, 1, 2), yt=ok, yp=ok
  [GOLD H=1] GARCH: blob_shape=(1107, 1, 2), yt=ok, yp=ok
[GOLD H=1] actual=YES

[GOLD H=1] Aligned series head (first 50 of 1107 rows):
   Actual  Simple_MLP_pred  GARCH_pred
 0.983207         0.340429    0.404734
0.0725466         0.475195     0.41204
 0.365228         0.153136    0.386377
 0.675363         0.445471    0.388799
  1.19577         0.713868    0.379988
 0.396614          1.24502    0.475619
 0.569428         0.501321    0.422518
0.0394832         0.532566    0.327936
 0.540289         0.543877    0.339051
 0.645375         0.715095    0.345618
 0.596069         0.919522    0.319951
 0.496734         0.646822    0.430255
 0.986594         0.271303     0.39405
0.0170706        0.0102254    0.317561
 0.388559         0.629708    0.318934
 0.616087         0.655644    0.332173
 0.603893         0.763518    0.327931
 0.739671         0.583307    0.429513
 0.832053         0.566336    0.361713
0.0110286        0.0107584     0.31836
 0.506412         0.487149    0.343133
 0.392005         0.611626    0.317323
  0.80787         0.858652    0.317796
 0.535457         0.559314    0.432284
 0.837414         0.462319    0.329636
0.0220968        0.0102691    0.341653
 0.498523         0.593219    0.378154
 0.472977         0.521982    0.333948
  1.61389         0.763689    0.327132
 0.641823         0.627251     0.43009
 0.600193         0.816263    0.335509
0.0226182        0.0113437    0.337952
 0.411008         0.631846    0.321095
 0.743739         0.569564    0.319928
 0.369762         0.762199    0.319878
  0.57285         0.516212    0.437624
  1.20635          0.31281    0.318503
0.0659951       0.00812005     0.33278
  1.33651         0.657572    0.321075
 0.970258          0.81574    0.336696
 0.553171         0.990921    0.331243
 0.193903         0.347003    0.415988
  1.26963         0.742318    0.340967
 0.222961        0.0108863    0.297523
 0.483934         0.723005    0.344531
   1.3403         0.842585    0.318788
  1.06602          1.08537    0.297852
 0.692459         0.207267    0.401388
 0.757949         0.685486    0.307926
0.0295473        0.0266834    0.300452
No description has been provided for this image
  [SP500 H=1] Simple_MLP: blob_shape=(694, 1, 2), yt=ok, yp=ok
  [SP500 H=1] GARCH: blob_shape=(694, 1, 2), yt=ok, yp=ok
[SP500 H=1] actual=YES

[SP500 H=1] Aligned series head (first 50 of 694 rows):
   Actual  Simple_MLP_pred  GARCH_pred
   1.0351          1.02346     2.38582
  1.03491          1.90407     2.27062
 0.818223          1.22392     2.22203
0.0217497         0.095521     2.64515
 0.502587         0.521083     2.22968
  2.13502          1.15246     2.12537
 0.903485          1.68215     2.31461
 0.898743         0.924198     2.09158
 0.904151          1.07291     2.48451
0.0405714        0.0543168     2.08508
 0.113815          0.41974     2.00842
  0.60793         0.676438     1.99926
  0.92855         0.553493      2.0014
 0.794209         0.416095     2.60646
 0.910428         0.233005     2.21111
0.0104435        0.0328714     2.03453
 0.610551         0.589967     2.30905
 0.532918         0.802979     2.05008
 0.832645         0.551246     2.22178
 0.902649         0.608186     2.66556
 0.540703         0.609466     2.51951
0.0320152        0.0198967     2.27669
  0.44739         0.526564     2.08586
  1.17379         0.444389     2.13541
 0.784725         0.616858     2.18155
  1.19858         0.948593     2.68093
   3.1834         0.348121     2.14288
  1.95774         0.112661     2.12631
  5.83359          2.93549     2.15689
  1.91438          5.05136     2.04765
  2.82295          4.47707     2.26379
  1.97836          4.49097     2.64127
  1.67848          4.21192     2.20808
 0.231827          1.26015     2.17517
  1.99306          2.11611     2.04889
 0.595525          2.37936     2.02309
   2.0958          1.78721     1.96395
  1.44715          2.04939     2.59256
   1.8779          1.87158     2.23315
0.0836623         0.228839     1.99344
 0.723351          1.52607     2.11009
 0.463055          1.67432     2.11381
 0.331959          1.61614     2.17244
 0.355102         0.664165      2.6587
 0.359515         0.510746      2.3202
0.0698865          0.17361     1.95308
 0.418851         0.598284     2.09169
 0.479569         0.728678     1.78404
 0.468942         0.659265     1.75529
 0.278331         0.823747     1.79928
No description has been provided for this image

Extract best parameters¶

In [136]:
from collections import Counter

def extract_best_params_global(results_store_subset, model_list):
    best = {}
    for model in model_list:
        seen = []
        for tkr, byH in results_store_subset.items():
            for H, byM in byH.items():
                if model in byM and "used_params" in byM[model]:
                    # freeze dict to hashable for counting
                    tup = tuple(sorted(byM[model]["used_params"].items()))
                    seen.append(tup)
        if seen:
            winner, _ = Counter(seen).most_common(1)[0]
            best[model] = dict(winner)
    return best


def extract_best_params_by_horizon(results_store_subset, model_list):
    out = {}
    for model in model_list:
        perH = {}
        # collect by horizon
        temp = {}
        for tkr, byH in results_store_subset.items():
            for H, byM in byH.items():
                if model in byM and "used_params" in byM[model]:
                    tup = tuple(sorted(byM[model]["used_params"].items()))
                    temp.setdefault(H, []).append(tup)
        for H, tuples in temp.items():
            if tuples:
                winner, _ = Counter(tuples).most_common(1)[0]
                perH[H] = dict(winner)
        if perH:
            out[model] = perH
    return out


def merge_fixed(FIXED_PARAMS, tuned_global):
    merged = {m: dict(FIXED_PARAMS[m]) for m in FIXED_PARAMS}
    for m, p in tuned_global.items():
        merged.setdefault(m, {})
        merged[m].update(p)
    return merged


def collect_final_params(results_store, *, fallback_to_model=True):
    params_by_model = {}
    for ticker, byH in results_store.items():
        for horizon, byM in byH.items():
            for model_name, payload in byM.items():

                params = payload.get("used_params")
                if (not params) and fallback_to_model:
                    mdl = payload.get("model")
                    if hasattr(mdl, "get_params"):
                        try:
                            params = mdl.get_params()
                        except Exception:
                            params = None

                if not params:
                    continue

                params_by_model.setdefault(model_name, {}) \
                               .setdefault(ticker, {})[horizon] = dict(params)
    return params_by_model


def print_final_params(params_by_model, *, sort_models=True, sort_tickers=True, sort_horizons=True):
    model_keys = sorted(params_by_model) if sort_models else params_by_model.keys()
    for model in model_keys:
        print(f"\n=== {model} ===")
        by_ticker = params_by_model[model]
        ticker_keys = sorted(by_ticker) if sort_tickers else by_ticker.keys()
        for tkr in ticker_keys:
            by_h = by_ticker[tkr]
            horizon_keys = sorted(by_h) if sort_horizons else by_h.keys()
            for H in horizon_keys:
                print(f"{tkr} | H={H}:")
                for k, v in sorted(by_h[H].items()):
                    print(f"  {k}: {v}")


def params_to_dataframe(params_by_model):
    import pandas as pd
    rows = []
    for model, by_t in params_by_model.items():
        for tkr, by_h in by_t.items():
            for H, params in by_h.items():
                row = {"model": model, "ticker": tkr, "horizon": H}
                row.update(params)
                rows.append(row)
    return pd.DataFrame(rows)


def filter_tuned_params(tuned: dict) -> dict:
    blacklist = {
        "no_tasks", "input_dim", "output_dim", "time_horizon",
        "merge_price_time", "flatten", "use_nested_cv", "single_holdout",
        "device", "verbose"
    }
    out = {}
    for model, params in tuned.items():
        out[model] = {k: v for k, v in params.items() if k not in blacklist}
    return out

Results on 1 task models¶

In [137]:
load_data_object_8_file_path = os.path.join(root_folder, objects_relative_path, "structured_data_dict_8.pkl")

with open(load_data_object_8_file_path, "rb") as f:
    structured_data_dict_8 = pickle.load(f)

print("Data dictionary 8 loaded successfully.")

tickers  = ["AAPL", "MSFT", "GE", "BAC", "C", "BTCUSDT", "EURUSD", "GOLD", "SP500"]


X_price_map, X_time_map, y_map = {}, {}, {}

missing = []
for t in tickers:
    if t not in structured_data_dict_8:
        missing.append(t)
        continue

    entry = structured_data_dict_8[t]

    Xp = entry.get("X_other", None)  
    Xt = entry.get("X_time",  None)
    Y  = entry.get("y",       None)

    if Xp is None or Y is None:
        print(f"[WARN] {t}: missing {'X_other' if Xp is None else ''}{' and ' if (Xp is None and Y is None) else ''}{'y' if Y is None else ''} → skipping.")
        continue

    Xp = np.asarray(Xp, dtype=float)
    Xt = None if Xt is None else np.asarray(Xt, dtype=float)
    Y  = np.asarray(Y,  dtype=float)

    
    if Xp.ndim != 3 or Y.ndim != 3:
        print(f"[WARN] {t}: unexpected dims X_other={Xp.shape}, y={Y.shape} → skipping.")
        continue
    if Xt is not None and Xt.ndim != 3:
        print(f"[WARN] {t}: unexpected dims X_time={Xt.shape} → setting to None.")
        Xt = None

    X_price_map[t] = Xp
    X_time_map[t]  = Xt
    y_map[t]       = Y

if missing:
    print(f"[INFO] Missing tickers in data dict (skipped): {missing}")
Data dictionary 8 loaded successfully.
In [ ]:
#models_to_tune = ["Simple_MLP", "Simple_KAN", "Simple_LSTM"]
models_to_tune = ["Simple_KAN"]
tune_tickers   = ["EURUSD"]
tune_horizons  = [20]

param_grid_tune = {
    "Simple_MLP": {
        "lr": [5e-4, 5e-3],
        "batch_size": [128, 512],
        "hidden_layers": [2, 4],
        "hidden_dim": [32, 64],
    },
    "Simple_KAN": {
        "lr": [5e-4, 5e-3],
        "batch_size": [128, 512],
        "knots": [10, 20],
        "spline_power": [5, 7],
        "hidden_dim": [32, 64],
        "hidden_layers": [2, 4],
    },
    
    "Simple_LSTM": {
        "lr": [5e-4, 5e-3],
        "batch_size": [16, 32],
    },
    "ITransformer": {
        "lr": [1e-6, 1e-5, 1e-4, 1e-3],
        "batch_size": [64, 256],
        "d_model": [64, 128],
        "d_ff": [128, 256],
        "n_heads": [4, 6],
        "e_layers": [2, 4],
    },
}

FIXED_PARAMS = {
    "Simple_MLP": dict(
        lr=5e-4, dropout=0.0, l2_weight=1e-5, batch_size=128, hidden_layers=3, hidden_dim=128,
        epochs=50, patience=10, min_epochs=30, min_delta=1e-4
    ),
    "Simple_LSTM": dict(
        lr=5e-4, dropout=0.0, l2_weight=1e-5, batch_size=16, hidden_layers=3, hidden_dim=32,
        epochs=50, patience=10, min_epochs=30, min_delta=1e-4
    ),
    "Simple_KAN": dict(
        lr=5e-4, dropout=0.0, l2_weight=1e-5, batch_size=128, hidden_layers=3, hidden_dim=128, knots=8, spline_power=5,
        epochs=50, patience=10, min_epochs=30, min_delta=1e-4
    ),
    "ITransformer": dict(
        lr=1e-4, l2_weight=1e-5, batch_size=256, d_model=128, d_ff=256, n_heads=4, e_layers=2, dropout=0.0,
        epochs=50, patience=10, min_epochs=30, min_delta=1e-4
    ),
}

MODEL_IO = {
    "Simple_MLP":   dict(merge_price_time=False, flatten=True),
    "Simple_KAN":   dict(merge_price_time=False, flatten=True),
    "Simple_LSTM":  dict(merge_price_time=False, flatten=False),
    "ITransformer": dict(merge_price_time=False, flatten=False),
}

COMMON = dict(
    use_nested_cv=False,     
    single_holdout=False,     
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,
    verbose=True,
    target_mode="log_mse"
)


MULTI_TASK_MODELS = set()


final_results_store = {}

TUNE_BASE_SAVE_DIR = os.path.join(root_folder, objects_relative_path, "tune_saved_object")
os.makedirs(TUNE_BASE_SAVE_DIR, exist_ok=True)

tune_store = run_all_models_for_all(
    tickers=tune_tickers,
    horizons=tune_horizons,
    model_list=models_to_tune,
    X_price_map=X_price_map,
    X_time_map=X_time_map,
    y_map=y_map,
    base_save_dir=TUNE_BASE_SAVE_DIR,
    fixed_params=FIXED_PARAMS,
    common_params=COMMON,
    model_io=MODEL_IO,
    multitask_models=MULTI_TASK_MODELS,
    param_grids=param_grid_tune,
    results_store=final_results_store,     
    overwrite=True,
    run_grid_with_single_holdout=True
    )


tuned_global = extract_best_params_global(tune_store, models_to_tune)


tuned_global_filtered = filter_tuned_params(tuned_global)
FIXED_PARAMS_MERGED = merge_fixed(FIXED_PARAMS, tuned_global_filtered)


final_tickers  = ["AAPL", "MSFT", "GE", "BAC", "C", "BTCUSDT", "EURUSD", "GOLD", "SP500"]
#final_models   = ["Simple_MLP", "Simple_KAN", "Simple_LSTM", "ITransformer"]
final_models   = ["Simple_KAN"]
final_horizons = [1, 5, 10, 20]

FINAL_BASE_SAVE_DIR = os.path.join(root_folder, objects_relative_path, "final_results_saved_object")
os.makedirs(FINAL_BASE_SAVE_DIR, exist_ok=True)

_ = run_all_models_for_all(
    tickers=final_tickers,
    horizons=final_horizons,
    model_list=final_models,              
    X_price_map=X_price_map,
    X_time_map=X_time_map,
    y_map=y_map,
    base_save_dir=FINAL_BASE_SAVE_DIR,
    fixed_params=FIXED_PARAMS_MERGED,    
    common_params=COMMON,
    model_io=MODEL_IO,
    multitask_models=MULTI_TASK_MODELS,
    param_grids=None,                     
    results_store=final_results_store,    
    overwrite=True                       
)
=== EURUSD | H=20 | Simple_KAN (grid + single-holdout) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 20
Features for y: 1

Single holdout: one train/test split, param search on a single train/val (early-stop) split.
Using device: mps
Epoch 1: Train mse = 0.9958 | Val mse = 1.1893
Epoch 2: Train mse = 0.9590 | Val mse = 1.0539
Epoch 3: Train mse = 0.9110 | Val mse = 0.9542
Epoch 4: Train mse = 0.8786 | Val mse = 0.9305
Epoch 5: Train mse = 0.8502 | Val mse = 0.9000
Epoch 6: Train mse = 0.8155 | Val mse = 0.8489
Epoch 7: Train mse = 0.7548 | Val mse = 0.7776
Epoch 8: Train mse = 0.6818 | Val mse = 0.7141
Epoch 9: Train mse = 0.6113 | Val mse = 0.6568
Epoch 10: Train mse = 0.5528 | Val mse = 0.6156
Epoch 11: Train mse = 0.5004 | Val mse = 0.5631
Epoch 12: Train mse = 0.4478 | Val mse = 0.5243
Epoch 13: Train mse = 0.4025 | Val mse = 0.4747
Epoch 14: Train mse = 0.3654 | Val mse = 0.4442
Epoch 15: Train mse = 0.3423 | Val mse = 0.4199
Epoch 16: Train mse = 0.3201 | Val mse = 0.4030
Epoch 17: Train mse = 0.3057 | Val mse = 0.3910
Epoch 18: Train mse = 0.2950 | Val mse = 0.3834
Epoch 19: Train mse = 0.2879 | Val mse = 0.3710
Epoch 20: Train mse = 0.2804 | Val mse = 0.3630
Epoch 21: Train mse = 0.2757 | Val mse = 0.3590
Epoch 22: Train mse = 0.2727 | Val mse = 0.3554
Epoch 23: Train mse = 0.2683 | Val mse = 0.3491
Epoch 24: Train mse = 0.2624 | Val mse = 0.3549
Epoch 25: Train mse = 0.2603 | Val mse = 0.3518
Epoch 26: Train mse = 0.2540 | Val mse = 0.3510
Epoch 27: Train mse = 0.2502 | Val mse = 0.3509
Epoch 28: Train mse = 0.2480 | Val mse = 0.3494
Epoch 29: Train mse = 0.2465 | Val mse = 0.3497
Epoch 30: Train mse = 0.2416 | Val mse = 0.3523
Epoch 31: Train mse = 0.2382 | Val mse = 0.3516
Epoch 32: Train mse = 0.2366 | Val mse = 0.3497
Epoch 33: Train mse = 0.2323 | Val mse = 0.3493
Early stopping triggered at epoch 33.
Using device: mps
Epoch 1: Train mse = 0.9960 | Val mse = 1.1902
Epoch 2: Train mse = 0.9599 | Val mse = 1.0556
Epoch 3: Train mse = 0.9119 | Val mse = 0.9553
Epoch 4: Train mse = 0.8798 | Val mse = 0.9309
Epoch 5: Train mse = 0.8526 | Val mse = 0.9012
Epoch 6: Train mse = 0.8219 | Val mse = 0.8532
Epoch 7: Train mse = 0.7654 | Val mse = 0.7817
Epoch 8: Train mse = 0.6922 | Val mse = 0.7163
Epoch 9: Train mse = 0.6200 | Val mse = 0.6583
Epoch 10: Train mse = 0.5610 | Val mse = 0.6182
Epoch 11: Train mse = 0.5092 | Val mse = 0.5649
Epoch 12: Train mse = 0.4566 | Val mse = 0.5254
Epoch 13: Train mse = 0.4104 | Val mse = 0.4735
Epoch 14: Train mse = 0.3717 | Val mse = 0.4418
Epoch 15: Train mse = 0.3473 | Val mse = 0.4164
Epoch 16: Train mse = 0.3240 | Val mse = 0.3988
Epoch 17: Train mse = 0.3091 | Val mse = 0.3860
Epoch 18: Train mse = 0.2982 | Val mse = 0.3785
Epoch 19: Train mse = 0.2912 | Val mse = 0.3653
Epoch 20: Train mse = 0.2839 | Val mse = 0.3580
Epoch 21: Train mse = 0.2794 | Val mse = 0.3531
Epoch 22: Train mse = 0.2765 | Val mse = 0.3494
Epoch 23: Train mse = 0.2723 | Val mse = 0.3431
Epoch 24: Train mse = 0.2665 | Val mse = 0.3487
Epoch 25: Train mse = 0.2647 | Val mse = 0.3453
Epoch 26: Train mse = 0.2587 | Val mse = 0.3444
Epoch 27: Train mse = 0.2550 | Val mse = 0.3443
Epoch 28: Train mse = 0.2533 | Val mse = 0.3430
Epoch 29: Train mse = 0.2520 | Val mse = 0.3416
Epoch 30: Train mse = 0.2470 | Val mse = 0.3441
Epoch 31: Train mse = 0.2438 | Val mse = 0.3439
Epoch 32: Train mse = 0.2426 | Val mse = 0.3412
Epoch 33: Train mse = 0.2383 | Val mse = 0.3406
Epoch 34: Train mse = 0.2357 | Val mse = 0.3414
Epoch 35: Train mse = 0.2334 | Val mse = 0.3457
Epoch 36: Train mse = 0.2312 | Val mse = 0.3470
Epoch 37: Train mse = 0.2302 | Val mse = 0.3443
Epoch 38: Train mse = 0.2262 | Val mse = 0.3493
Epoch 39: Train mse = 0.2255 | Val mse = 0.3498
Epoch 40: Train mse = 0.2233 | Val mse = 0.3549
Epoch 41: Train mse = 0.2217 | Val mse = 0.3543
Epoch 42: Train mse = 0.2181 | Val mse = 0.3586
Epoch 43: Train mse = 0.2158 | Val mse = 0.3586
Early stopping triggered at epoch 43.
Using device: mps
Epoch 1: Train mse = 0.8984 | Val mse = 0.8299
Epoch 2: Train mse = 0.6341 | Val mse = 0.5931
Epoch 3: Train mse = 0.3898 | Val mse = 0.3901
Epoch 4: Train mse = 0.3132 | Val mse = 0.3456
Epoch 5: Train mse = 0.2846 | Val mse = 0.3434
Epoch 6: Train mse = 0.2624 | Val mse = 0.3446
Epoch 7: Train mse = 0.2496 | Val mse = 0.3591
Epoch 8: Train mse = 0.2371 | Val mse = 0.3527
Epoch 9: Train mse = 0.2284 | Val mse = 0.3724
Epoch 10: Train mse = 0.2179 | Val mse = 0.3744
Epoch 11: Train mse = 0.2120 | Val mse = 0.3737
Epoch 12: Train mse = 0.2061 | Val mse = 0.3731
Epoch 13: Train mse = 0.2010 | Val mse = 0.3754
Epoch 14: Train mse = 0.1946 | Val mse = 0.3725
Epoch 15: Train mse = 0.1928 | Val mse = 0.3833
Epoch 16: Train mse = 0.1894 | Val mse = 0.3873
Epoch 17: Train mse = 0.1824 | Val mse = 0.3862
Epoch 18: Train mse = 0.1743 | Val mse = 0.3978
Epoch 19: Train mse = 0.1693 | Val mse = 0.3986
Epoch 20: Train mse = 0.1647 | Val mse = 0.4133
Epoch 21: Train mse = 0.1643 | Val mse = 0.4058
Epoch 22: Train mse = 0.1628 | Val mse = 0.4133
Epoch 23: Train mse = 0.1589 | Val mse = 0.4081
Epoch 24: Train mse = 0.1586 | Val mse = 0.4052
Epoch 25: Train mse = 0.1548 | Val mse = 0.4178
Epoch 26: Train mse = 0.1526 | Val mse = 0.4215
Epoch 27: Train mse = 0.1538 | Val mse = 0.4277
Epoch 28: Train mse = 0.1535 | Val mse = 0.4104
Epoch 29: Train mse = 0.1523 | Val mse = 0.4152
Epoch 30: Train mse = 0.1481 | Val mse = 0.4124
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9010 | Val mse = 0.8331
Epoch 2: Train mse = 0.6454 | Val mse = 0.6030
Epoch 3: Train mse = 0.3981 | Val mse = 0.3894
Epoch 4: Train mse = 0.3156 | Val mse = 0.3413
Epoch 5: Train mse = 0.2844 | Val mse = 0.3396
Epoch 6: Train mse = 0.2635 | Val mse = 0.3389
Epoch 7: Train mse = 0.2518 | Val mse = 0.3530
Epoch 8: Train mse = 0.2397 | Val mse = 0.3462
Epoch 9: Train mse = 0.2320 | Val mse = 0.3576
Epoch 10: Train mse = 0.2235 | Val mse = 0.3705
Epoch 11: Train mse = 0.2187 | Val mse = 0.3713
Epoch 12: Train mse = 0.2111 | Val mse = 0.3660
Epoch 13: Train mse = 0.2015 | Val mse = 0.3666
Epoch 14: Train mse = 0.1938 | Val mse = 0.3804
Epoch 15: Train mse = 0.1891 | Val mse = 0.3795
Epoch 16: Train mse = 0.1861 | Val mse = 0.3873
Epoch 17: Train mse = 0.1818 | Val mse = 0.3925
Epoch 18: Train mse = 0.1760 | Val mse = 0.3946
Epoch 19: Train mse = 0.1728 | Val mse = 0.3973
Epoch 20: Train mse = 0.1693 | Val mse = 0.3957
Epoch 21: Train mse = 0.1689 | Val mse = 0.4028
Epoch 22: Train mse = 0.1652 | Val mse = 0.4144
Epoch 23: Train mse = 0.1636 | Val mse = 0.4066
Epoch 24: Train mse = 0.1651 | Val mse = 0.4014
Epoch 25: Train mse = 0.1624 | Val mse = 0.3967
Epoch 26: Train mse = 0.1572 | Val mse = 0.4103
Epoch 27: Train mse = 0.1509 | Val mse = 0.4184
Epoch 28: Train mse = 0.1504 | Val mse = 0.4138
Epoch 29: Train mse = 0.1502 | Val mse = 0.4036
Epoch 30: Train mse = 0.1464 | Val mse = 0.4195
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9931 | Val mse = 1.1859
Epoch 2: Train mse = 0.9542 | Val mse = 1.0484
Epoch 3: Train mse = 0.9099 | Val mse = 0.9627
Epoch 4: Train mse = 0.8770 | Val mse = 0.9211
Epoch 5: Train mse = 0.8316 | Val mse = 0.8733
Epoch 6: Train mse = 0.7821 | Val mse = 0.8249
Epoch 7: Train mse = 0.7292 | Val mse = 0.7807
Epoch 8: Train mse = 0.6763 | Val mse = 0.7408
Epoch 9: Train mse = 0.6151 | Val mse = 0.6847
Epoch 10: Train mse = 0.5534 | Val mse = 0.6306
Epoch 11: Train mse = 0.4958 | Val mse = 0.5810
Epoch 12: Train mse = 0.4363 | Val mse = 0.5417
Epoch 13: Train mse = 0.3890 | Val mse = 0.5000
Epoch 14: Train mse = 0.3511 | Val mse = 0.4698
Epoch 15: Train mse = 0.3251 | Val mse = 0.4469
Epoch 16: Train mse = 0.3064 | Val mse = 0.4233
Epoch 17: Train mse = 0.2956 | Val mse = 0.4130
Epoch 18: Train mse = 0.2807 | Val mse = 0.4053
Epoch 19: Train mse = 0.2737 | Val mse = 0.3990
Epoch 20: Train mse = 0.2609 | Val mse = 0.3964
Epoch 21: Train mse = 0.2572 | Val mse = 0.3911
Epoch 22: Train mse = 0.2506 | Val mse = 0.3908
Epoch 23: Train mse = 0.2412 | Val mse = 0.3873
Epoch 24: Train mse = 0.2331 | Val mse = 0.3856
Epoch 25: Train mse = 0.2266 | Val mse = 0.3862
Epoch 26: Train mse = 0.2204 | Val mse = 0.3865
Epoch 27: Train mse = 0.2148 | Val mse = 0.3874
Epoch 28: Train mse = 0.2104 | Val mse = 0.3877
Epoch 29: Train mse = 0.2079 | Val mse = 0.3872
Epoch 30: Train mse = 0.2027 | Val mse = 0.3906
Epoch 31: Train mse = 0.1974 | Val mse = 0.3889
Epoch 32: Train mse = 0.1950 | Val mse = 0.3950
Epoch 33: Train mse = 0.1892 | Val mse = 0.3958
Epoch 34: Train mse = 0.1869 | Val mse = 0.3958
Early stopping triggered at epoch 34.
Using device: mps
Epoch 1: Train mse = 0.9934 | Val mse = 1.1869
Epoch 2: Train mse = 0.9547 | Val mse = 1.0483
Epoch 3: Train mse = 0.9100 | Val mse = 0.9627
Epoch 4: Train mse = 0.8774 | Val mse = 0.9207
Epoch 5: Train mse = 0.8320 | Val mse = 0.8719
Epoch 6: Train mse = 0.7814 | Val mse = 0.8215
Epoch 7: Train mse = 0.7292 | Val mse = 0.7779
Epoch 8: Train mse = 0.6791 | Val mse = 0.7406
Epoch 9: Train mse = 0.6194 | Val mse = 0.6830
Epoch 10: Train mse = 0.5578 | Val mse = 0.6271
Epoch 11: Train mse = 0.4994 | Val mse = 0.5761
Epoch 12: Train mse = 0.4385 | Val mse = 0.5343
Epoch 13: Train mse = 0.3899 | Val mse = 0.4901
Epoch 14: Train mse = 0.3511 | Val mse = 0.4595
Epoch 15: Train mse = 0.3251 | Val mse = 0.4372
Epoch 16: Train mse = 0.3068 | Val mse = 0.4130
Epoch 17: Train mse = 0.2967 | Val mse = 0.4046
Epoch 18: Train mse = 0.2828 | Val mse = 0.3949
Epoch 19: Train mse = 0.2762 | Val mse = 0.3888
Epoch 20: Train mse = 0.2636 | Val mse = 0.3855
Epoch 21: Train mse = 0.2607 | Val mse = 0.3796
Epoch 22: Train mse = 0.2547 | Val mse = 0.3797
Epoch 23: Train mse = 0.2455 | Val mse = 0.3758
Epoch 24: Train mse = 0.2379 | Val mse = 0.3762
Epoch 25: Train mse = 0.2322 | Val mse = 0.3752
Epoch 26: Train mse = 0.2265 | Val mse = 0.3746
Epoch 27: Train mse = 0.2209 | Val mse = 0.3751
Epoch 28: Train mse = 0.2167 | Val mse = 0.3746
Epoch 29: Train mse = 0.2144 | Val mse = 0.3774
Epoch 30: Train mse = 0.2094 | Val mse = 0.3800
Epoch 31: Train mse = 0.2041 | Val mse = 0.3766
Epoch 32: Train mse = 0.2017 | Val mse = 0.3842
Epoch 33: Train mse = 0.1959 | Val mse = 0.3857
Epoch 34: Train mse = 0.1937 | Val mse = 0.3872
Epoch 35: Train mse = 0.1909 | Val mse = 0.3878
Epoch 36: Train mse = 0.1877 | Val mse = 0.3900
Early stopping triggered at epoch 36.
Using device: mps
Epoch 1: Train mse = 0.8868 | Val mse = 0.8391
Epoch 2: Train mse = 0.6267 | Val mse = 0.5518
Epoch 3: Train mse = 0.3814 | Val mse = 0.4114
Epoch 4: Train mse = 0.3181 | Val mse = 0.3646
Epoch 5: Train mse = 0.2840 | Val mse = 0.3562
Epoch 6: Train mse = 0.2603 | Val mse = 0.3670
Epoch 7: Train mse = 0.2429 | Val mse = 0.3506
Epoch 8: Train mse = 0.2292 | Val mse = 0.3480
Epoch 9: Train mse = 0.2210 | Val mse = 0.3593
Epoch 10: Train mse = 0.2130 | Val mse = 0.3521
Epoch 11: Train mse = 0.2053 | Val mse = 0.3639
Epoch 12: Train mse = 0.2015 | Val mse = 0.3662
Epoch 13: Train mse = 0.1969 | Val mse = 0.3581
Epoch 14: Train mse = 0.1938 | Val mse = 0.3647
Epoch 15: Train mse = 0.1875 | Val mse = 0.3648
Epoch 16: Train mse = 0.1804 | Val mse = 0.3731
Epoch 17: Train mse = 0.1780 | Val mse = 0.3826
Epoch 18: Train mse = 0.1739 | Val mse = 0.3787
Epoch 19: Train mse = 0.1711 | Val mse = 0.3823
Epoch 20: Train mse = 0.1688 | Val mse = 0.4020
Epoch 21: Train mse = 0.1740 | Val mse = 0.3811
Epoch 22: Train mse = 0.1715 | Val mse = 0.3820
Epoch 23: Train mse = 0.1697 | Val mse = 0.3975
Epoch 24: Train mse = 0.1716 | Val mse = 0.3882
Epoch 25: Train mse = 0.1702 | Val mse = 0.3864
Epoch 26: Train mse = 0.1689 | Val mse = 0.3860
Epoch 27: Train mse = 0.1625 | Val mse = 0.3906
Epoch 28: Train mse = 0.1584 | Val mse = 0.3880
Epoch 29: Train mse = 0.1583 | Val mse = 0.3899
Epoch 30: Train mse = 0.1530 | Val mse = 0.4033
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.8869 | Val mse = 0.8434
Epoch 2: Train mse = 0.6325 | Val mse = 0.5579
Epoch 3: Train mse = 0.3821 | Val mse = 0.4066
Epoch 4: Train mse = 0.3182 | Val mse = 0.3795
Epoch 5: Train mse = 0.2857 | Val mse = 0.3632
Epoch 6: Train mse = 0.2597 | Val mse = 0.3845
Epoch 7: Train mse = 0.2469 | Val mse = 0.3601
Epoch 8: Train mse = 0.2332 | Val mse = 0.3594
Epoch 9: Train mse = 0.2220 | Val mse = 0.3668
Epoch 10: Train mse = 0.2121 | Val mse = 0.3701
Epoch 11: Train mse = 0.2060 | Val mse = 0.3760
Epoch 12: Train mse = 0.2054 | Val mse = 0.3699
Epoch 13: Train mse = 0.1971 | Val mse = 0.3644
Epoch 14: Train mse = 0.1883 | Val mse = 0.3754
Epoch 15: Train mse = 0.1878 | Val mse = 0.3848
Epoch 16: Train mse = 0.1859 | Val mse = 0.3874
Epoch 17: Train mse = 0.1812 | Val mse = 0.3851
Epoch 18: Train mse = 0.1775 | Val mse = 0.3789
Epoch 19: Train mse = 0.1719 | Val mse = 0.3876
Epoch 20: Train mse = 0.1639 | Val mse = 0.3965
Epoch 21: Train mse = 0.1653 | Val mse = 0.3934
Epoch 22: Train mse = 0.1654 | Val mse = 0.3953
Epoch 23: Train mse = 0.1650 | Val mse = 0.3978
Epoch 24: Train mse = 0.1653 | Val mse = 0.4043
Epoch 25: Train mse = 0.1619 | Val mse = 0.4011
Epoch 26: Train mse = 0.1657 | Val mse = 0.4161
Epoch 27: Train mse = 0.1676 | Val mse = 0.4009
Epoch 28: Train mse = 0.1621 | Val mse = 0.4112
Epoch 29: Train mse = 0.1574 | Val mse = 0.3991
Epoch 30: Train mse = 0.1487 | Val mse = 0.3989
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9946 | Val mse = 1.2229
Epoch 2: Train mse = 0.9788 | Val mse = 1.1110
Epoch 3: Train mse = 0.9143 | Val mse = 0.9544
Epoch 4: Train mse = 0.8862 | Val mse = 0.9475
Epoch 5: Train mse = 0.8676 | Val mse = 0.9267
Epoch 6: Train mse = 0.8559 | Val mse = 0.9193
Epoch 7: Train mse = 0.8518 | Val mse = 0.9156
Epoch 8: Train mse = 0.8439 | Val mse = 0.9110
Epoch 9: Train mse = 0.8359 | Val mse = 0.8992
Epoch 10: Train mse = 0.8079 | Val mse = 0.8624
Epoch 11: Train mse = 0.7591 | Val mse = 0.8061
Epoch 12: Train mse = 0.7116 | Val mse = 0.7933
Epoch 13: Train mse = 0.6747 | Val mse = 0.7842
Epoch 14: Train mse = 0.6373 | Val mse = 0.7630
Epoch 15: Train mse = 0.6001 | Val mse = 0.7420
Epoch 16: Train mse = 0.5624 | Val mse = 0.7097
Epoch 17: Train mse = 0.5199 | Val mse = 0.6787
Epoch 18: Train mse = 0.4804 | Val mse = 0.6446
Epoch 19: Train mse = 0.4422 | Val mse = 0.6181
Epoch 20: Train mse = 0.4130 | Val mse = 0.5934
Epoch 21: Train mse = 0.3852 | Val mse = 0.5718
Epoch 22: Train mse = 0.3643 | Val mse = 0.5633
Epoch 23: Train mse = 0.3454 | Val mse = 0.5471
Epoch 24: Train mse = 0.3354 | Val mse = 0.5427
Epoch 25: Train mse = 0.3197 | Val mse = 0.5316
Epoch 26: Train mse = 0.3122 | Val mse = 0.5213
Epoch 27: Train mse = 0.3035 | Val mse = 0.5167
Epoch 28: Train mse = 0.2951 | Val mse = 0.5050
Epoch 29: Train mse = 0.2912 | Val mse = 0.4906
Epoch 30: Train mse = 0.2851 | Val mse = 0.4939
Epoch 31: Train mse = 0.2741 | Val mse = 0.4768
Epoch 32: Train mse = 0.2680 | Val mse = 0.4793
Epoch 33: Train mse = 0.2621 | Val mse = 0.4697
Epoch 34: Train mse = 0.2578 | Val mse = 0.4758
Epoch 35: Train mse = 0.2569 | Val mse = 0.4711
Epoch 36: Train mse = 0.2512 | Val mse = 0.4614
Epoch 37: Train mse = 0.2491 | Val mse = 0.4678
Epoch 38: Train mse = 0.2432 | Val mse = 0.4623
Epoch 39: Train mse = 0.2407 | Val mse = 0.4604
Epoch 40: Train mse = 0.2380 | Val mse = 0.4610
Epoch 41: Train mse = 0.2371 | Val mse = 0.4679
Epoch 42: Train mse = 0.2334 | Val mse = 0.4603
Epoch 43: Train mse = 0.2288 | Val mse = 0.4591
Epoch 44: Train mse = 0.2280 | Val mse = 0.4613
Epoch 45: Train mse = 0.2249 | Val mse = 0.4663
Epoch 46: Train mse = 0.2209 | Val mse = 0.4673
Epoch 47: Train mse = 0.2190 | Val mse = 0.4649
Epoch 48: Train mse = 0.2186 | Val mse = 0.4729
Epoch 49: Train mse = 0.2163 | Val mse = 0.4662
Epoch 50: Train mse = 0.2132 | Val mse = 0.4706
Using device: mps
Epoch 1: Train mse = 0.9947 | Val mse = 1.2233
Epoch 2: Train mse = 0.9801 | Val mse = 1.1163
Epoch 3: Train mse = 0.9155 | Val mse = 0.9531
Epoch 4: Train mse = 0.8864 | Val mse = 0.9480
Epoch 5: Train mse = 0.8687 | Val mse = 0.9279
Epoch 6: Train mse = 0.8571 | Val mse = 0.9210
Epoch 7: Train mse = 0.8533 | Val mse = 0.9168
Epoch 8: Train mse = 0.8466 | Val mse = 0.9130
Epoch 9: Train mse = 0.8420 | Val mse = 0.9039
Epoch 10: Train mse = 0.8230 | Val mse = 0.8798
Epoch 11: Train mse = 0.7885 | Val mse = 0.8321
Epoch 12: Train mse = 0.7334 | Val mse = 0.7933
Epoch 13: Train mse = 0.6938 | Val mse = 0.7843
Epoch 14: Train mse = 0.6560 | Val mse = 0.7632
Epoch 15: Train mse = 0.6171 | Val mse = 0.7402
Epoch 16: Train mse = 0.5773 | Val mse = 0.7009
Epoch 17: Train mse = 0.5327 | Val mse = 0.6658
Epoch 18: Train mse = 0.4938 | Val mse = 0.6304
Epoch 19: Train mse = 0.4558 | Val mse = 0.6010
Epoch 20: Train mse = 0.4292 | Val mse = 0.5791
Epoch 21: Train mse = 0.3999 | Val mse = 0.5590
Epoch 22: Train mse = 0.3739 | Val mse = 0.5467
Epoch 23: Train mse = 0.3507 | Val mse = 0.5260
Epoch 24: Train mse = 0.3388 | Val mse = 0.5156
Epoch 25: Train mse = 0.3229 | Val mse = 0.4958
Epoch 26: Train mse = 0.3135 | Val mse = 0.4811
Epoch 27: Train mse = 0.3040 | Val mse = 0.4746
Epoch 28: Train mse = 0.2952 | Val mse = 0.4566
Epoch 29: Train mse = 0.2911 | Val mse = 0.4477
Epoch 30: Train mse = 0.2843 | Val mse = 0.4434
Epoch 31: Train mse = 0.2752 | Val mse = 0.4360
Epoch 32: Train mse = 0.2694 | Val mse = 0.4226
Epoch 33: Train mse = 0.2637 | Val mse = 0.4225
Epoch 34: Train mse = 0.2599 | Val mse = 0.4200
Epoch 35: Train mse = 0.2587 | Val mse = 0.4103
Epoch 36: Train mse = 0.2536 | Val mse = 0.4091
Epoch 37: Train mse = 0.2515 | Val mse = 0.4065
Epoch 38: Train mse = 0.2461 | Val mse = 0.4025
Epoch 39: Train mse = 0.2444 | Val mse = 0.3979
Epoch 40: Train mse = 0.2413 | Val mse = 0.3981
Epoch 41: Train mse = 0.2404 | Val mse = 0.3958
Epoch 42: Train mse = 0.2362 | Val mse = 0.3963
Epoch 43: Train mse = 0.2332 | Val mse = 0.3953
Epoch 44: Train mse = 0.2321 | Val mse = 0.3991
Epoch 45: Train mse = 0.2291 | Val mse = 0.3943
Epoch 46: Train mse = 0.2263 | Val mse = 0.3992
Epoch 47: Train mse = 0.2238 | Val mse = 0.3946
Epoch 48: Train mse = 0.2231 | Val mse = 0.4007
Epoch 49: Train mse = 0.2221 | Val mse = 0.3988
Epoch 50: Train mse = 0.2192 | Val mse = 0.3980
Using device: mps
Epoch 1: Train mse = 0.9218 | Val mse = 0.9658
Epoch 2: Train mse = 0.8047 | Val mse = 0.7846
Epoch 3: Train mse = 0.6772 | Val mse = 0.6857
Epoch 4: Train mse = 0.5140 | Val mse = 0.5154
Epoch 5: Train mse = 0.3839 | Val mse = 0.4387
Epoch 6: Train mse = 0.3369 | Val mse = 0.3856
Epoch 7: Train mse = 0.3033 | Val mse = 0.3750
Epoch 8: Train mse = 0.2886 | Val mse = 0.3416
Epoch 9: Train mse = 0.2712 | Val mse = 0.3525
Epoch 10: Train mse = 0.2657 | Val mse = 0.3346
Epoch 11: Train mse = 0.2556 | Val mse = 0.3333
Epoch 12: Train mse = 0.2455 | Val mse = 0.3334
Epoch 13: Train mse = 0.2396 | Val mse = 0.3411
Epoch 14: Train mse = 0.2451 | Val mse = 0.3644
Epoch 15: Train mse = 0.2392 | Val mse = 0.3408
Epoch 16: Train mse = 0.2260 | Val mse = 0.3421
Epoch 17: Train mse = 0.2163 | Val mse = 0.3433
Epoch 18: Train mse = 0.2094 | Val mse = 0.3461
Epoch 19: Train mse = 0.2047 | Val mse = 0.3628
Epoch 20: Train mse = 0.2015 | Val mse = 0.3581
Epoch 21: Train mse = 0.1969 | Val mse = 0.3618
Epoch 22: Train mse = 0.1923 | Val mse = 0.3625
Epoch 23: Train mse = 0.1900 | Val mse = 0.3674
Epoch 24: Train mse = 0.1914 | Val mse = 0.3679
Epoch 25: Train mse = 0.1910 | Val mse = 0.3539
Epoch 26: Train mse = 0.1935 | Val mse = 0.3591
Epoch 27: Train mse = 0.1910 | Val mse = 0.3535
Epoch 28: Train mse = 0.1966 | Val mse = 0.3834
Epoch 29: Train mse = 0.1985 | Val mse = 0.3738
Epoch 30: Train mse = 0.1946 | Val mse = 0.3909
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9226 | Val mse = 0.9688
Epoch 2: Train mse = 0.8126 | Val mse = 0.7823
Epoch 3: Train mse = 0.6883 | Val mse = 0.6967
Epoch 4: Train mse = 0.5311 | Val mse = 0.5214
Epoch 5: Train mse = 0.3951 | Val mse = 0.4270
Epoch 6: Train mse = 0.3351 | Val mse = 0.3984
Epoch 7: Train mse = 0.3125 | Val mse = 0.3603
Epoch 8: Train mse = 0.2943 | Val mse = 0.3501
Epoch 9: Train mse = 0.2833 | Val mse = 0.3512
Epoch 10: Train mse = 0.2708 | Val mse = 0.3319
Epoch 11: Train mse = 0.2573 | Val mse = 0.3258
Epoch 12: Train mse = 0.2460 | Val mse = 0.3318
Epoch 13: Train mse = 0.2382 | Val mse = 0.3312
Epoch 14: Train mse = 0.2343 | Val mse = 0.3462
Epoch 15: Train mse = 0.2328 | Val mse = 0.3381
Epoch 16: Train mse = 0.2279 | Val mse = 0.3486
Epoch 17: Train mse = 0.2216 | Val mse = 0.3360
Epoch 18: Train mse = 0.2130 | Val mse = 0.3441
Epoch 19: Train mse = 0.2070 | Val mse = 0.3434
Epoch 20: Train mse = 0.2025 | Val mse = 0.3332
Epoch 21: Train mse = 0.1979 | Val mse = 0.3496
Epoch 22: Train mse = 0.1942 | Val mse = 0.3456
Epoch 23: Train mse = 0.1927 | Val mse = 0.3444
Epoch 24: Train mse = 0.1905 | Val mse = 0.3477
Epoch 25: Train mse = 0.1870 | Val mse = 0.3634
Epoch 26: Train mse = 0.1887 | Val mse = 0.3421
Epoch 27: Train mse = 0.1812 | Val mse = 0.3489
Epoch 28: Train mse = 0.1815 | Val mse = 0.3688
Epoch 29: Train mse = 0.1838 | Val mse = 0.3549
Epoch 30: Train mse = 0.1847 | Val mse = 0.3608
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 1.0039 | Val mse = 1.2176
Epoch 2: Train mse = 0.9787 | Val mse = 1.1156
Epoch 3: Train mse = 0.9211 | Val mse = 0.9675
Epoch 4: Train mse = 0.8839 | Val mse = 0.9424
Epoch 5: Train mse = 0.8571 | Val mse = 0.9078
Epoch 6: Train mse = 0.8202 | Val mse = 0.8571
Epoch 7: Train mse = 0.7665 | Val mse = 0.8232
Epoch 8: Train mse = 0.7340 | Val mse = 0.8066
Epoch 9: Train mse = 0.7018 | Val mse = 0.7928
Epoch 10: Train mse = 0.6743 | Val mse = 0.7769
Epoch 11: Train mse = 0.6398 | Val mse = 0.7636
Epoch 12: Train mse = 0.5977 | Val mse = 0.7439
Epoch 13: Train mse = 0.5482 | Val mse = 0.7178
Epoch 14: Train mse = 0.4967 | Val mse = 0.6933
Epoch 15: Train mse = 0.4460 | Val mse = 0.6715
Epoch 16: Train mse = 0.4041 | Val mse = 0.6458
Epoch 17: Train mse = 0.3721 | Val mse = 0.6269
Epoch 18: Train mse = 0.3458 | Val mse = 0.6137
Epoch 19: Train mse = 0.3209 | Val mse = 0.6023
Epoch 20: Train mse = 0.3035 | Val mse = 0.6070
Epoch 21: Train mse = 0.2913 | Val mse = 0.5960
Epoch 22: Train mse = 0.2799 | Val mse = 0.5900
Epoch 23: Train mse = 0.2728 | Val mse = 0.5822
Epoch 24: Train mse = 0.2639 | Val mse = 0.5814
Epoch 25: Train mse = 0.2565 | Val mse = 0.5856
Epoch 26: Train mse = 0.2530 | Val mse = 0.5904
Epoch 27: Train mse = 0.2469 | Val mse = 0.6023
Epoch 28: Train mse = 0.2421 | Val mse = 0.5990
Epoch 29: Train mse = 0.2361 | Val mse = 0.5992
Epoch 30: Train mse = 0.2335 | Val mse = 0.6002
Epoch 31: Train mse = 0.2270 | Val mse = 0.5951
Epoch 32: Train mse = 0.2253 | Val mse = 0.5999
Epoch 33: Train mse = 0.2212 | Val mse = 0.5974
Epoch 34: Train mse = 0.2191 | Val mse = 0.6040
Early stopping triggered at epoch 34.
Using device: mps
Epoch 1: Train mse = 1.0040 | Val mse = 1.2184
Epoch 2: Train mse = 0.9801 | Val mse = 1.1185
Epoch 3: Train mse = 0.9209 | Val mse = 0.9651
Epoch 4: Train mse = 0.8835 | Val mse = 0.9420
Epoch 5: Train mse = 0.8594 | Val mse = 0.9092
Epoch 6: Train mse = 0.8272 | Val mse = 0.8605
Epoch 7: Train mse = 0.7714 | Val mse = 0.8228
Epoch 8: Train mse = 0.7365 | Val mse = 0.8042
Epoch 9: Train mse = 0.7037 | Val mse = 0.7919
Epoch 10: Train mse = 0.6763 | Val mse = 0.7774
Epoch 11: Train mse = 0.6412 | Val mse = 0.7649
Epoch 12: Train mse = 0.5970 | Val mse = 0.7494
Epoch 13: Train mse = 0.5445 | Val mse = 0.7218
Epoch 14: Train mse = 0.4942 | Val mse = 0.6962
Epoch 15: Train mse = 0.4472 | Val mse = 0.6723
Epoch 16: Train mse = 0.4077 | Val mse = 0.6474
Epoch 17: Train mse = 0.3798 | Val mse = 0.6249
Epoch 18: Train mse = 0.3535 | Val mse = 0.6112
Epoch 19: Train mse = 0.3290 | Val mse = 0.6058
Epoch 20: Train mse = 0.3112 | Val mse = 0.5850
Epoch 21: Train mse = 0.3001 | Val mse = 0.5837
Epoch 22: Train mse = 0.2900 | Val mse = 0.5775
Epoch 23: Train mse = 0.2837 | Val mse = 0.5830
Epoch 24: Train mse = 0.2744 | Val mse = 0.5690
Epoch 25: Train mse = 0.2664 | Val mse = 0.5667
Epoch 26: Train mse = 0.2621 | Val mse = 0.5684
Epoch 27: Train mse = 0.2551 | Val mse = 0.5667
Epoch 28: Train mse = 0.2474 | Val mse = 0.5652
Epoch 29: Train mse = 0.2415 | Val mse = 0.5679
Epoch 30: Train mse = 0.2378 | Val mse = 0.5683
Epoch 31: Train mse = 0.2317 | Val mse = 0.5747
Epoch 32: Train mse = 0.2288 | Val mse = 0.5729
Epoch 33: Train mse = 0.2244 | Val mse = 0.5750
Epoch 34: Train mse = 0.2227 | Val mse = 0.5777
Epoch 35: Train mse = 0.2202 | Val mse = 0.5668
Epoch 36: Train mse = 0.2165 | Val mse = 0.5822
Epoch 37: Train mse = 0.2132 | Val mse = 0.5745
Epoch 38: Train mse = 0.2125 | Val mse = 0.5766
Early stopping triggered at epoch 38.
Using device: mps
Epoch 1: Train mse = 0.9193 | Val mse = 0.8601
Epoch 2: Train mse = 0.7388 | Val mse = 0.7846
Epoch 3: Train mse = 0.5725 | Val mse = 0.6329
Epoch 4: Train mse = 0.4108 | Val mse = 0.4839
Epoch 5: Train mse = 0.3518 | Val mse = 0.4668
Epoch 6: Train mse = 0.3258 | Val mse = 0.4313
Epoch 7: Train mse = 0.3078 | Val mse = 0.3696
Epoch 8: Train mse = 0.2934 | Val mse = 0.3626
Epoch 9: Train mse = 0.2770 | Val mse = 0.3788
Epoch 10: Train mse = 0.2652 | Val mse = 0.3533
Epoch 11: Train mse = 0.2523 | Val mse = 0.3503
Epoch 12: Train mse = 0.2427 | Val mse = 0.3603
Epoch 13: Train mse = 0.2331 | Val mse = 0.3475
Epoch 14: Train mse = 0.2247 | Val mse = 0.3657
Epoch 15: Train mse = 0.2229 | Val mse = 0.3492
Epoch 16: Train mse = 0.2199 | Val mse = 0.3629
Epoch 17: Train mse = 0.2211 | Val mse = 0.3554
Epoch 18: Train mse = 0.2227 | Val mse = 0.3726
Epoch 19: Train mse = 0.2293 | Val mse = 0.3661
Epoch 20: Train mse = 0.2272 | Val mse = 0.3582
Epoch 21: Train mse = 0.2217 | Val mse = 0.3598
Epoch 22: Train mse = 0.2142 | Val mse = 0.3407
Epoch 23: Train mse = 0.2069 | Val mse = 0.3483
Epoch 24: Train mse = 0.1988 | Val mse = 0.3500
Epoch 25: Train mse = 0.1954 | Val mse = 0.3493
Epoch 26: Train mse = 0.1892 | Val mse = 0.3475
Epoch 27: Train mse = 0.1838 | Val mse = 0.3617
Epoch 28: Train mse = 0.1824 | Val mse = 0.3374
Epoch 29: Train mse = 0.1801 | Val mse = 0.3549
Epoch 30: Train mse = 0.1806 | Val mse = 0.3611
Epoch 31: Train mse = 0.1782 | Val mse = 0.3585
Epoch 32: Train mse = 0.1810 | Val mse = 0.3650
Epoch 33: Train mse = 0.1837 | Val mse = 0.3613
Epoch 34: Train mse = 0.1847 | Val mse = 0.3749
Epoch 35: Train mse = 0.1854 | Val mse = 0.3722
Epoch 36: Train mse = 0.1858 | Val mse = 0.3718
Epoch 37: Train mse = 0.1881 | Val mse = 0.3751
Epoch 38: Train mse = 0.1898 | Val mse = 0.3706
Early stopping triggered at epoch 38.
Using device: mps
Epoch 1: Train mse = 0.9203 | Val mse = 0.8643
Epoch 2: Train mse = 0.7402 | Val mse = 0.7854
Epoch 3: Train mse = 0.5854 | Val mse = 0.6430
Epoch 4: Train mse = 0.4121 | Val mse = 0.4467
Epoch 5: Train mse = 0.3554 | Val mse = 0.4097
Epoch 6: Train mse = 0.3125 | Val mse = 0.3902
Epoch 7: Train mse = 0.2969 | Val mse = 0.3490
Epoch 8: Train mse = 0.2786 | Val mse = 0.3635
Epoch 9: Train mse = 0.2677 | Val mse = 0.3809
Epoch 10: Train mse = 0.2647 | Val mse = 0.3525
Epoch 11: Train mse = 0.2510 | Val mse = 0.3399
Epoch 12: Train mse = 0.2372 | Val mse = 0.3529
Epoch 13: Train mse = 0.2271 | Val mse = 0.3544
Epoch 14: Train mse = 0.2215 | Val mse = 0.3574
Epoch 15: Train mse = 0.2155 | Val mse = 0.3485
Epoch 16: Train mse = 0.2163 | Val mse = 0.3583
Epoch 17: Train mse = 0.2140 | Val mse = 0.3695
Epoch 18: Train mse = 0.2159 | Val mse = 0.3546
Epoch 19: Train mse = 0.2124 | Val mse = 0.3801
Epoch 20: Train mse = 0.2105 | Val mse = 0.3507
Epoch 21: Train mse = 0.2073 | Val mse = 0.3697
Epoch 22: Train mse = 0.2050 | Val mse = 0.3729
Epoch 23: Train mse = 0.2042 | Val mse = 0.3766
Epoch 24: Train mse = 0.2019 | Val mse = 0.3644
Epoch 25: Train mse = 0.1950 | Val mse = 0.3534
Epoch 26: Train mse = 0.1932 | Val mse = 0.3550
Epoch 27: Train mse = 0.1871 | Val mse = 0.3592
Epoch 28: Train mse = 0.1833 | Val mse = 0.3862
Epoch 29: Train mse = 0.1822 | Val mse = 0.3758
Epoch 30: Train mse = 0.1798 | Val mse = 0.3617
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9891 | Val mse = 1.1586
Epoch 2: Train mse = 0.9297 | Val mse = 0.9621
Epoch 3: Train mse = 0.8721 | Val mse = 0.8875
Epoch 4: Train mse = 0.7890 | Val mse = 0.7991
Epoch 5: Train mse = 0.7035 | Val mse = 0.7187
Epoch 6: Train mse = 0.6185 | Val mse = 0.6440
Epoch 7: Train mse = 0.5365 | Val mse = 0.5738
Epoch 8: Train mse = 0.4596 | Val mse = 0.5065
Epoch 9: Train mse = 0.4008 | Val mse = 0.4597
Epoch 10: Train mse = 0.3600 | Val mse = 0.4206
Epoch 11: Train mse = 0.3325 | Val mse = 0.4036
Epoch 12: Train mse = 0.3182 | Val mse = 0.3847
Epoch 13: Train mse = 0.3087 | Val mse = 0.3750
Epoch 14: Train mse = 0.2968 | Val mse = 0.3693
Epoch 15: Train mse = 0.2904 | Val mse = 0.3651
Epoch 16: Train mse = 0.2834 | Val mse = 0.3551
Epoch 17: Train mse = 0.2744 | Val mse = 0.3523
Epoch 18: Train mse = 0.2687 | Val mse = 0.3450
Epoch 19: Train mse = 0.2606 | Val mse = 0.3464
Epoch 20: Train mse = 0.2562 | Val mse = 0.3458
Epoch 21: Train mse = 0.2523 | Val mse = 0.3497
Epoch 22: Train mse = 0.2471 | Val mse = 0.3455
Epoch 23: Train mse = 0.2425 | Val mse = 0.3503
Epoch 24: Train mse = 0.2392 | Val mse = 0.3429
Epoch 25: Train mse = 0.2331 | Val mse = 0.3440
Epoch 26: Train mse = 0.2296 | Val mse = 0.3515
Epoch 27: Train mse = 0.2262 | Val mse = 0.3469
Epoch 28: Train mse = 0.2215 | Val mse = 0.3524
Epoch 29: Train mse = 0.2191 | Val mse = 0.3509
Epoch 30: Train mse = 0.2145 | Val mse = 0.3565
Epoch 31: Train mse = 0.2128 | Val mse = 0.3573
Epoch 32: Train mse = 0.2072 | Val mse = 0.3644
Epoch 33: Train mse = 0.2063 | Val mse = 0.3638
Epoch 34: Train mse = 0.2033 | Val mse = 0.3682
Early stopping triggered at epoch 34.
Using device: mps
Epoch 1: Train mse = 0.9895 | Val mse = 1.1604
Epoch 2: Train mse = 0.9310 | Val mse = 0.9653
Epoch 3: Train mse = 0.8752 | Val mse = 0.8917
Epoch 4: Train mse = 0.7960 | Val mse = 0.8020
Epoch 5: Train mse = 0.7110 | Val mse = 0.7217
Epoch 6: Train mse = 0.6279 | Val mse = 0.6489
Epoch 7: Train mse = 0.5475 | Val mse = 0.5795
Epoch 8: Train mse = 0.4715 | Val mse = 0.5115
Epoch 9: Train mse = 0.4111 | Val mse = 0.4625
Epoch 10: Train mse = 0.3679 | Val mse = 0.4207
Epoch 11: Train mse = 0.3380 | Val mse = 0.4023
Epoch 12: Train mse = 0.3224 | Val mse = 0.3824
Epoch 13: Train mse = 0.3131 | Val mse = 0.3728
Epoch 14: Train mse = 0.3011 | Val mse = 0.3666
Epoch 15: Train mse = 0.2947 | Val mse = 0.3619
Epoch 16: Train mse = 0.2882 | Val mse = 0.3517
Epoch 17: Train mse = 0.2793 | Val mse = 0.3494
Epoch 18: Train mse = 0.2739 | Val mse = 0.3413
Epoch 19: Train mse = 0.2661 | Val mse = 0.3423
Epoch 20: Train mse = 0.2621 | Val mse = 0.3419
Epoch 21: Train mse = 0.2585 | Val mse = 0.3457
Epoch 22: Train mse = 0.2535 | Val mse = 0.3419
Epoch 23: Train mse = 0.2492 | Val mse = 0.3467
Epoch 24: Train mse = 0.2463 | Val mse = 0.3388
Epoch 25: Train mse = 0.2405 | Val mse = 0.3389
Epoch 26: Train mse = 0.2374 | Val mse = 0.3475
Epoch 27: Train mse = 0.2346 | Val mse = 0.3424
Epoch 28: Train mse = 0.2302 | Val mse = 0.3480
Epoch 29: Train mse = 0.2278 | Val mse = 0.3448
Epoch 30: Train mse = 0.2234 | Val mse = 0.3489
Epoch 31: Train mse = 0.2220 | Val mse = 0.3504
Epoch 32: Train mse = 0.2167 | Val mse = 0.3590
Epoch 33: Train mse = 0.2162 | Val mse = 0.3569
Epoch 34: Train mse = 0.2127 | Val mse = 0.3593
Early stopping triggered at epoch 34.
Using device: mps
Epoch 1: Train mse = 0.8164 | Val mse = 0.6790
Epoch 2: Train mse = 0.4640 | Val mse = 0.4361
Epoch 3: Train mse = 0.3329 | Val mse = 0.3608
Epoch 4: Train mse = 0.2912 | Val mse = 0.3350
Epoch 5: Train mse = 0.2691 | Val mse = 0.3300
Epoch 6: Train mse = 0.2497 | Val mse = 0.3403
Epoch 7: Train mse = 0.2379 | Val mse = 0.3550
Epoch 8: Train mse = 0.2225 | Val mse = 0.3667
Epoch 9: Train mse = 0.2147 | Val mse = 0.3620
Epoch 10: Train mse = 0.2076 | Val mse = 0.3788
Epoch 11: Train mse = 0.1950 | Val mse = 0.3731
Epoch 12: Train mse = 0.1892 | Val mse = 0.3769
Epoch 13: Train mse = 0.1818 | Val mse = 0.3749
Epoch 14: Train mse = 0.1792 | Val mse = 0.3705
Epoch 15: Train mse = 0.1746 | Val mse = 0.3811
Epoch 16: Train mse = 0.1659 | Val mse = 0.3784
Epoch 17: Train mse = 0.1583 | Val mse = 0.3941
Epoch 18: Train mse = 0.1526 | Val mse = 0.3998
Epoch 19: Train mse = 0.1513 | Val mse = 0.4020
Epoch 20: Train mse = 0.1469 | Val mse = 0.4061
Epoch 21: Train mse = 0.1470 | Val mse = 0.3970
Epoch 22: Train mse = 0.1450 | Val mse = 0.3957
Epoch 23: Train mse = 0.1470 | Val mse = 0.4242
Epoch 24: Train mse = 0.1441 | Val mse = 0.4140
Epoch 25: Train mse = 0.1372 | Val mse = 0.4029
Epoch 26: Train mse = 0.1308 | Val mse = 0.4139
Epoch 27: Train mse = 0.1233 | Val mse = 0.4265
Epoch 28: Train mse = 0.1167 | Val mse = 0.4139
Epoch 29: Train mse = 0.1127 | Val mse = 0.4228
Epoch 30: Train mse = 0.1089 | Val mse = 0.4339
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.8213 | Val mse = 0.6868
Epoch 2: Train mse = 0.4743 | Val mse = 0.4358
Epoch 3: Train mse = 0.3356 | Val mse = 0.3552
Epoch 4: Train mse = 0.2933 | Val mse = 0.3320
Epoch 5: Train mse = 0.2702 | Val mse = 0.3263
Epoch 6: Train mse = 0.2524 | Val mse = 0.3317
Epoch 7: Train mse = 0.2412 | Val mse = 0.3527
Epoch 8: Train mse = 0.2272 | Val mse = 0.3645
Epoch 9: Train mse = 0.2201 | Val mse = 0.3624
Epoch 10: Train mse = 0.2111 | Val mse = 0.3679
Epoch 11: Train mse = 0.2004 | Val mse = 0.3737
Epoch 12: Train mse = 0.1926 | Val mse = 0.3669
Epoch 13: Train mse = 0.1855 | Val mse = 0.3730
Epoch 14: Train mse = 0.1835 | Val mse = 0.3762
Epoch 15: Train mse = 0.1773 | Val mse = 0.3705
Epoch 16: Train mse = 0.1701 | Val mse = 0.3800
Epoch 17: Train mse = 0.1688 | Val mse = 0.3902
Epoch 18: Train mse = 0.1640 | Val mse = 0.3918
Epoch 19: Train mse = 0.1560 | Val mse = 0.3942
Epoch 20: Train mse = 0.1504 | Val mse = 0.3987
Epoch 21: Train mse = 0.1447 | Val mse = 0.4046
Epoch 22: Train mse = 0.1431 | Val mse = 0.4060
Epoch 23: Train mse = 0.1390 | Val mse = 0.4067
Epoch 24: Train mse = 0.1349 | Val mse = 0.4090
Epoch 25: Train mse = 0.1280 | Val mse = 0.4147
Epoch 26: Train mse = 0.1269 | Val mse = 0.4081
Epoch 27: Train mse = 0.1241 | Val mse = 0.4127
Epoch 28: Train mse = 0.1195 | Val mse = 0.4240
Epoch 29: Train mse = 0.1193 | Val mse = 0.4132
Epoch 30: Train mse = 0.1152 | Val mse = 0.4216
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9851 | Val mse = 1.1486
Epoch 2: Train mse = 0.9190 | Val mse = 0.9320
Epoch 3: Train mse = 0.8589 | Val mse = 0.8801
Epoch 4: Train mse = 0.7795 | Val mse = 0.7869
Epoch 5: Train mse = 0.6830 | Val mse = 0.7042
Epoch 6: Train mse = 0.5855 | Val mse = 0.6328
Epoch 7: Train mse = 0.4988 | Val mse = 0.5689
Epoch 8: Train mse = 0.4225 | Val mse = 0.5105
Epoch 9: Train mse = 0.3640 | Val mse = 0.4568
Epoch 10: Train mse = 0.3266 | Val mse = 0.4309
Epoch 11: Train mse = 0.3093 | Val mse = 0.4144
Epoch 12: Train mse = 0.2910 | Val mse = 0.3993
Epoch 13: Train mse = 0.2771 | Val mse = 0.3872
Epoch 14: Train mse = 0.2632 | Val mse = 0.3818
Epoch 15: Train mse = 0.2545 | Val mse = 0.3796
Epoch 16: Train mse = 0.2427 | Val mse = 0.3710
Epoch 17: Train mse = 0.2325 | Val mse = 0.3751
Epoch 18: Train mse = 0.2259 | Val mse = 0.3733
Epoch 19: Train mse = 0.2156 | Val mse = 0.3713
Epoch 20: Train mse = 0.2074 | Val mse = 0.3745
Epoch 21: Train mse = 0.2017 | Val mse = 0.3745
Epoch 22: Train mse = 0.1955 | Val mse = 0.3778
Epoch 23: Train mse = 0.1899 | Val mse = 0.3793
Epoch 24: Train mse = 0.1855 | Val mse = 0.3771
Epoch 25: Train mse = 0.1826 | Val mse = 0.3810
Epoch 26: Train mse = 0.1791 | Val mse = 0.3798
Epoch 27: Train mse = 0.1740 | Val mse = 0.3831
Epoch 28: Train mse = 0.1714 | Val mse = 0.3848
Epoch 29: Train mse = 0.1674 | Val mse = 0.3868
Epoch 30: Train mse = 0.1644 | Val mse = 0.3910
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9856 | Val mse = 1.1504
Epoch 2: Train mse = 0.9197 | Val mse = 0.9327
Epoch 3: Train mse = 0.8602 | Val mse = 0.8806
Epoch 4: Train mse = 0.7811 | Val mse = 0.7865
Epoch 5: Train mse = 0.6858 | Val mse = 0.7037
Epoch 6: Train mse = 0.5876 | Val mse = 0.6309
Epoch 7: Train mse = 0.5022 | Val mse = 0.5668
Epoch 8: Train mse = 0.4261 | Val mse = 0.5060
Epoch 9: Train mse = 0.3671 | Val mse = 0.4486
Epoch 10: Train mse = 0.3296 | Val mse = 0.4239
Epoch 11: Train mse = 0.3131 | Val mse = 0.4084
Epoch 12: Train mse = 0.2956 | Val mse = 0.3938
Epoch 13: Train mse = 0.2827 | Val mse = 0.3813
Epoch 14: Train mse = 0.2694 | Val mse = 0.3763
Epoch 15: Train mse = 0.2616 | Val mse = 0.3729
Epoch 16: Train mse = 0.2506 | Val mse = 0.3641
Epoch 17: Train mse = 0.2407 | Val mse = 0.3660
Epoch 18: Train mse = 0.2346 | Val mse = 0.3636
Epoch 19: Train mse = 0.2245 | Val mse = 0.3623
Epoch 20: Train mse = 0.2161 | Val mse = 0.3660
Epoch 21: Train mse = 0.2104 | Val mse = 0.3643
Epoch 22: Train mse = 0.2041 | Val mse = 0.3709
Epoch 23: Train mse = 0.1986 | Val mse = 0.3714
Epoch 24: Train mse = 0.1936 | Val mse = 0.3708
Epoch 25: Train mse = 0.1906 | Val mse = 0.3727
Epoch 26: Train mse = 0.1870 | Val mse = 0.3752
Epoch 27: Train mse = 0.1815 | Val mse = 0.3792
Epoch 28: Train mse = 0.1788 | Val mse = 0.3820
Epoch 29: Train mse = 0.1747 | Val mse = 0.3843
Epoch 30: Train mse = 0.1715 | Val mse = 0.3878
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.8049 | Val mse = 0.6466
Epoch 2: Train mse = 0.4288 | Val mse = 0.4093
Epoch 3: Train mse = 0.3206 | Val mse = 0.3712
Epoch 4: Train mse = 0.2815 | Val mse = 0.3548
Epoch 5: Train mse = 0.2494 | Val mse = 0.3454
Epoch 6: Train mse = 0.2320 | Val mse = 0.3591
Epoch 7: Train mse = 0.2162 | Val mse = 0.3581
Epoch 8: Train mse = 0.2051 | Val mse = 0.3524
Epoch 9: Train mse = 0.1948 | Val mse = 0.3624
Epoch 10: Train mse = 0.1863 | Val mse = 0.3598
Epoch 11: Train mse = 0.1809 | Val mse = 0.3589
Epoch 12: Train mse = 0.1737 | Val mse = 0.3660
Epoch 13: Train mse = 0.1722 | Val mse = 0.3753
Epoch 14: Train mse = 0.1707 | Val mse = 0.3758
Epoch 15: Train mse = 0.1685 | Val mse = 0.3726
Epoch 16: Train mse = 0.1658 | Val mse = 0.3753
Epoch 17: Train mse = 0.1603 | Val mse = 0.3816
Epoch 18: Train mse = 0.1572 | Val mse = 0.3861
Epoch 19: Train mse = 0.1552 | Val mse = 0.3881
Epoch 20: Train mse = 0.1512 | Val mse = 0.3838
Epoch 21: Train mse = 0.1505 | Val mse = 0.3932
Epoch 22: Train mse = 0.1464 | Val mse = 0.3970
Epoch 23: Train mse = 0.1434 | Val mse = 0.3945
Epoch 24: Train mse = 0.1378 | Val mse = 0.3973
Epoch 25: Train mse = 0.1354 | Val mse = 0.4028
Epoch 26: Train mse = 0.1316 | Val mse = 0.3924
Epoch 27: Train mse = 0.1254 | Val mse = 0.4011
Epoch 28: Train mse = 0.1216 | Val mse = 0.4163
Epoch 29: Train mse = 0.1174 | Val mse = 0.4034
Epoch 30: Train mse = 0.1123 | Val mse = 0.4214
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.8063 | Val mse = 0.6536
Epoch 2: Train mse = 0.4317 | Val mse = 0.4050
Epoch 3: Train mse = 0.3199 | Val mse = 0.3733
Epoch 4: Train mse = 0.2813 | Val mse = 0.3474
Epoch 5: Train mse = 0.2508 | Val mse = 0.3456
Epoch 6: Train mse = 0.2341 | Val mse = 0.3536
Epoch 7: Train mse = 0.2217 | Val mse = 0.3558
Epoch 8: Train mse = 0.2067 | Val mse = 0.3585
Epoch 9: Train mse = 0.1944 | Val mse = 0.3626
Epoch 10: Train mse = 0.1820 | Val mse = 0.3549
Epoch 11: Train mse = 0.1736 | Val mse = 0.3603
Epoch 12: Train mse = 0.1655 | Val mse = 0.3615
Epoch 13: Train mse = 0.1624 | Val mse = 0.3725
Epoch 14: Train mse = 0.1601 | Val mse = 0.3775
Epoch 15: Train mse = 0.1584 | Val mse = 0.3708
Epoch 16: Train mse = 0.1565 | Val mse = 0.3677
Epoch 17: Train mse = 0.1583 | Val mse = 0.3830
Epoch 18: Train mse = 0.1555 | Val mse = 0.3733
Epoch 19: Train mse = 0.1527 | Val mse = 0.3838
Epoch 20: Train mse = 0.1481 | Val mse = 0.3866
Epoch 21: Train mse = 0.1490 | Val mse = 0.3836
Epoch 22: Train mse = 0.1409 | Val mse = 0.3980
Epoch 23: Train mse = 0.1364 | Val mse = 0.4032
Epoch 24: Train mse = 0.1347 | Val mse = 0.4027
Epoch 25: Train mse = 0.1297 | Val mse = 0.3957
Epoch 26: Train mse = 0.1248 | Val mse = 0.4041
Epoch 27: Train mse = 0.1204 | Val mse = 0.4165
Epoch 28: Train mse = 0.1196 | Val mse = 0.4088
Epoch 29: Train mse = 0.1143 | Val mse = 0.4061
Epoch 30: Train mse = 0.1131 | Val mse = 0.3958
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9920 | Val mse = 1.1842
Epoch 2: Train mse = 0.9306 | Val mse = 0.9563
Epoch 3: Train mse = 0.8821 | Val mse = 0.9477
Epoch 4: Train mse = 0.8628 | Val mse = 0.9404
Epoch 5: Train mse = 0.8418 | Val mse = 0.9080
Epoch 6: Train mse = 0.7873 | Val mse = 0.8209
Epoch 7: Train mse = 0.7269 | Val mse = 0.7880
Epoch 8: Train mse = 0.6898 | Val mse = 0.7523
Epoch 9: Train mse = 0.6205 | Val mse = 0.6541
Epoch 10: Train mse = 0.5238 | Val mse = 0.5822
Epoch 11: Train mse = 0.4446 | Val mse = 0.5165
Epoch 12: Train mse = 0.3724 | Val mse = 0.4482
Epoch 13: Train mse = 0.3318 | Val mse = 0.4155
Epoch 14: Train mse = 0.3064 | Val mse = 0.3948
Epoch 15: Train mse = 0.2932 | Val mse = 0.3729
Epoch 16: Train mse = 0.2834 | Val mse = 0.3691
Epoch 17: Train mse = 0.2752 | Val mse = 0.3540
Epoch 18: Train mse = 0.2662 | Val mse = 0.3702
Epoch 19: Train mse = 0.2612 | Val mse = 0.3529
Epoch 20: Train mse = 0.2544 | Val mse = 0.3466
Epoch 21: Train mse = 0.2487 | Val mse = 0.3411
Epoch 22: Train mse = 0.2453 | Val mse = 0.3469
Epoch 23: Train mse = 0.2419 | Val mse = 0.3421
Epoch 24: Train mse = 0.2374 | Val mse = 0.3493
Epoch 25: Train mse = 0.2336 | Val mse = 0.3434
Epoch 26: Train mse = 0.2301 | Val mse = 0.3470
Epoch 27: Train mse = 0.2271 | Val mse = 0.3516
Epoch 28: Train mse = 0.2250 | Val mse = 0.3597
Epoch 29: Train mse = 0.2214 | Val mse = 0.3586
Epoch 30: Train mse = 0.2197 | Val mse = 0.3602
Epoch 31: Train mse = 0.2163 | Val mse = 0.3541
Early stopping triggered at epoch 31.
Using device: mps
Epoch 1: Train mse = 0.9923 | Val mse = 1.1869
Epoch 2: Train mse = 0.9321 | Val mse = 0.9561
Epoch 3: Train mse = 0.8830 | Val mse = 0.9479
Epoch 4: Train mse = 0.8648 | Val mse = 0.9410
Epoch 5: Train mse = 0.8454 | Val mse = 0.9131
Epoch 6: Train mse = 0.7999 | Val mse = 0.8317
Epoch 7: Train mse = 0.7340 | Val mse = 0.7867
Epoch 8: Train mse = 0.6987 | Val mse = 0.7598
Epoch 9: Train mse = 0.6386 | Val mse = 0.6686
Epoch 10: Train mse = 0.5426 | Val mse = 0.5879
Epoch 11: Train mse = 0.4625 | Val mse = 0.5245
Epoch 12: Train mse = 0.3886 | Val mse = 0.4541
Epoch 13: Train mse = 0.3429 | Val mse = 0.4163
Epoch 14: Train mse = 0.3145 | Val mse = 0.3934
Epoch 15: Train mse = 0.2992 | Val mse = 0.3733
Epoch 16: Train mse = 0.2895 | Val mse = 0.3693
Epoch 17: Train mse = 0.2813 | Val mse = 0.3539
Epoch 18: Train mse = 0.2727 | Val mse = 0.3683
Epoch 19: Train mse = 0.2673 | Val mse = 0.3485
Epoch 20: Train mse = 0.2600 | Val mse = 0.3449
Epoch 21: Train mse = 0.2550 | Val mse = 0.3393
Epoch 22: Train mse = 0.2521 | Val mse = 0.3419
Epoch 23: Train mse = 0.2490 | Val mse = 0.3410
Epoch 24: Train mse = 0.2445 | Val mse = 0.3450
Epoch 25: Train mse = 0.2412 | Val mse = 0.3394
Epoch 26: Train mse = 0.2381 | Val mse = 0.3427
Epoch 27: Train mse = 0.2353 | Val mse = 0.3493
Epoch 28: Train mse = 0.2338 | Val mse = 0.3545
Epoch 29: Train mse = 0.2309 | Val mse = 0.3498
Epoch 30: Train mse = 0.2295 | Val mse = 0.3512
Epoch 31: Train mse = 0.2257 | Val mse = 0.3472
Early stopping triggered at epoch 31.
Using device: mps
Epoch 1: Train mse = 0.8886 | Val mse = 0.8520
Epoch 2: Train mse = 0.6514 | Val mse = 0.5752
Epoch 3: Train mse = 0.3994 | Val mse = 0.4508
Epoch 4: Train mse = 0.3325 | Val mse = 0.3879
Epoch 5: Train mse = 0.3103 | Val mse = 0.3575
Epoch 6: Train mse = 0.2915 | Val mse = 0.3387
Epoch 7: Train mse = 0.2794 | Val mse = 0.3250
Epoch 8: Train mse = 0.2641 | Val mse = 0.3459
Epoch 9: Train mse = 0.2510 | Val mse = 0.3343
Epoch 10: Train mse = 0.2443 | Val mse = 0.3427
Epoch 11: Train mse = 0.2412 | Val mse = 0.3499
Epoch 12: Train mse = 0.2335 | Val mse = 0.3432
Epoch 13: Train mse = 0.2285 | Val mse = 0.3433
Epoch 14: Train mse = 0.2250 | Val mse = 0.3431
Epoch 15: Train mse = 0.2208 | Val mse = 0.3460
Epoch 16: Train mse = 0.2146 | Val mse = 0.3454
Epoch 17: Train mse = 0.2070 | Val mse = 0.3439
Epoch 18: Train mse = 0.2023 | Val mse = 0.3484
Epoch 19: Train mse = 0.1955 | Val mse = 0.3502
Epoch 20: Train mse = 0.1942 | Val mse = 0.3488
Epoch 21: Train mse = 0.1895 | Val mse = 0.3386
Epoch 22: Train mse = 0.1834 | Val mse = 0.3445
Epoch 23: Train mse = 0.1786 | Val mse = 0.3472
Epoch 24: Train mse = 0.1750 | Val mse = 0.3632
Epoch 25: Train mse = 0.1725 | Val mse = 0.3575
Epoch 26: Train mse = 0.1691 | Val mse = 0.3677
Epoch 27: Train mse = 0.1650 | Val mse = 0.3626
Epoch 28: Train mse = 0.1627 | Val mse = 0.3614
Epoch 29: Train mse = 0.1605 | Val mse = 0.3847
Epoch 30: Train mse = 0.1598 | Val mse = 0.3776
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.8919 | Val mse = 0.8754
Epoch 2: Train mse = 0.6615 | Val mse = 0.5760
Epoch 3: Train mse = 0.4062 | Val mse = 0.4394
Epoch 4: Train mse = 0.3335 | Val mse = 0.3664
Epoch 5: Train mse = 0.3110 | Val mse = 0.3669
Epoch 6: Train mse = 0.2864 | Val mse = 0.3261
Epoch 7: Train mse = 0.2806 | Val mse = 0.3184
Epoch 8: Train mse = 0.2672 | Val mse = 0.3448
Epoch 9: Train mse = 0.2533 | Val mse = 0.3241
Epoch 10: Train mse = 0.2428 | Val mse = 0.3329
Epoch 11: Train mse = 0.2385 | Val mse = 0.3340
Epoch 12: Train mse = 0.2280 | Val mse = 0.3362
Epoch 13: Train mse = 0.2230 | Val mse = 0.3406
Epoch 14: Train mse = 0.2171 | Val mse = 0.3374
Epoch 15: Train mse = 0.2120 | Val mse = 0.3476
Epoch 16: Train mse = 0.2085 | Val mse = 0.3471
Epoch 17: Train mse = 0.2062 | Val mse = 0.3484
Epoch 18: Train mse = 0.2035 | Val mse = 0.3516
Epoch 19: Train mse = 0.1990 | Val mse = 0.3531
Epoch 20: Train mse = 0.1946 | Val mse = 0.3513
Epoch 21: Train mse = 0.1911 | Val mse = 0.3535
Epoch 22: Train mse = 0.1858 | Val mse = 0.3569
Epoch 23: Train mse = 0.1864 | Val mse = 0.3667
Epoch 24: Train mse = 0.1818 | Val mse = 0.3607
Epoch 25: Train mse = 0.1775 | Val mse = 0.3510
Epoch 26: Train mse = 0.1723 | Val mse = 0.3620
Epoch 27: Train mse = 0.1703 | Val mse = 0.3744
Epoch 28: Train mse = 0.1761 | Val mse = 0.3753
Epoch 29: Train mse = 0.1740 | Val mse = 0.3551
Epoch 30: Train mse = 0.1726 | Val mse = 0.3705
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9988 | Val mse = 1.1751
Epoch 2: Train mse = 0.9280 | Val mse = 0.9471
Epoch 3: Train mse = 0.8810 | Val mse = 0.9378
Epoch 4: Train mse = 0.8550 | Val mse = 0.9119
Epoch 5: Train mse = 0.8345 | Val mse = 0.8945
Epoch 6: Train mse = 0.7742 | Val mse = 0.8082
Epoch 7: Train mse = 0.7215 | Val mse = 0.7927
Epoch 8: Train mse = 0.6819 | Val mse = 0.7664
Epoch 9: Train mse = 0.6341 | Val mse = 0.7204
Epoch 10: Train mse = 0.5571 | Val mse = 0.6648
Epoch 11: Train mse = 0.4751 | Val mse = 0.5926
Epoch 12: Train mse = 0.3998 | Val mse = 0.5485
Epoch 13: Train mse = 0.3382 | Val mse = 0.5109
Epoch 14: Train mse = 0.3027 | Val mse = 0.5084
Epoch 15: Train mse = 0.2828 | Val mse = 0.4817
Epoch 16: Train mse = 0.2656 | Val mse = 0.4528
Epoch 17: Train mse = 0.2547 | Val mse = 0.4682
Epoch 18: Train mse = 0.2454 | Val mse = 0.4539
Epoch 19: Train mse = 0.2347 | Val mse = 0.4548
Epoch 20: Train mse = 0.2250 | Val mse = 0.4490
Epoch 21: Train mse = 0.2189 | Val mse = 0.4601
Epoch 22: Train mse = 0.2114 | Val mse = 0.4677
Epoch 23: Train mse = 0.2072 | Val mse = 0.4722
Epoch 24: Train mse = 0.2030 | Val mse = 0.4683
Epoch 25: Train mse = 0.1983 | Val mse = 0.4620
Epoch 26: Train mse = 0.1947 | Val mse = 0.4800
Epoch 27: Train mse = 0.1911 | Val mse = 0.4577
Epoch 28: Train mse = 0.1884 | Val mse = 0.4758
Epoch 29: Train mse = 0.1877 | Val mse = 0.4661
Epoch 30: Train mse = 0.1832 | Val mse = 0.4848
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9992 | Val mse = 1.1787
Epoch 2: Train mse = 0.9287 | Val mse = 0.9470
Epoch 3: Train mse = 0.8813 | Val mse = 0.9392
Epoch 4: Train mse = 0.8562 | Val mse = 0.9119
Epoch 5: Train mse = 0.8387 | Val mse = 0.8986
Epoch 6: Train mse = 0.7818 | Val mse = 0.8097
Epoch 7: Train mse = 0.7254 | Val mse = 0.7911
Epoch 8: Train mse = 0.6867 | Val mse = 0.7659
Epoch 9: Train mse = 0.6406 | Val mse = 0.7197
Epoch 10: Train mse = 0.5664 | Val mse = 0.6621
Epoch 11: Train mse = 0.4877 | Val mse = 0.5861
Epoch 12: Train mse = 0.4117 | Val mse = 0.5321
Epoch 13: Train mse = 0.3471 | Val mse = 0.4927
Epoch 14: Train mse = 0.3082 | Val mse = 0.5053
Epoch 15: Train mse = 0.2889 | Val mse = 0.4721
Epoch 16: Train mse = 0.2713 | Val mse = 0.4498
Epoch 17: Train mse = 0.2603 | Val mse = 0.4476
Epoch 18: Train mse = 0.2528 | Val mse = 0.4408
Epoch 19: Train mse = 0.2436 | Val mse = 0.4272
Epoch 20: Train mse = 0.2337 | Val mse = 0.4355
Epoch 21: Train mse = 0.2264 | Val mse = 0.4367
Epoch 22: Train mse = 0.2193 | Val mse = 0.4564
Epoch 23: Train mse = 0.2146 | Val mse = 0.4592
Epoch 24: Train mse = 0.2096 | Val mse = 0.4445
Epoch 25: Train mse = 0.2034 | Val mse = 0.4596
Epoch 26: Train mse = 0.1994 | Val mse = 0.4618
Epoch 27: Train mse = 0.1965 | Val mse = 0.4433
Epoch 28: Train mse = 0.1945 | Val mse = 0.4529
Epoch 29: Train mse = 0.1932 | Val mse = 0.4524
Epoch 30: Train mse = 0.1890 | Val mse = 0.4613
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.8878 | Val mse = 0.8138
Epoch 2: Train mse = 0.6905 | Val mse = 0.6508
Epoch 3: Train mse = 0.4295 | Val mse = 0.4367
Epoch 4: Train mse = 0.3451 | Val mse = 0.3707
Epoch 5: Train mse = 0.3028 | Val mse = 0.3690
Epoch 6: Train mse = 0.2800 | Val mse = 0.3378
Epoch 7: Train mse = 0.2650 | Val mse = 0.3452
Epoch 8: Train mse = 0.2506 | Val mse = 0.3379
Epoch 9: Train mse = 0.2460 | Val mse = 0.3521
Epoch 10: Train mse = 0.2394 | Val mse = 0.3462
Epoch 11: Train mse = 0.2339 | Val mse = 0.3489
Epoch 12: Train mse = 0.2280 | Val mse = 0.3334
Epoch 13: Train mse = 0.2248 | Val mse = 0.3383
Epoch 14: Train mse = 0.2240 | Val mse = 0.3471
Epoch 15: Train mse = 0.2191 | Val mse = 0.3510
Epoch 16: Train mse = 0.2130 | Val mse = 0.3411
Epoch 17: Train mse = 0.2091 | Val mse = 0.3493
Epoch 18: Train mse = 0.2029 | Val mse = 0.3450
Epoch 19: Train mse = 0.1986 | Val mse = 0.3413
Epoch 20: Train mse = 0.1954 | Val mse = 0.3473
Epoch 21: Train mse = 0.1909 | Val mse = 0.3534
Epoch 22: Train mse = 0.1877 | Val mse = 0.3440
Epoch 23: Train mse = 0.1867 | Val mse = 0.3562
Epoch 24: Train mse = 0.1874 | Val mse = 0.3670
Epoch 25: Train mse = 0.1945 | Val mse = 0.3704
Epoch 26: Train mse = 0.1907 | Val mse = 0.3692
Epoch 27: Train mse = 0.1879 | Val mse = 0.3554
Epoch 28: Train mse = 0.1876 | Val mse = 0.3595
Epoch 29: Train mse = 0.1877 | Val mse = 0.3609
Epoch 30: Train mse = 0.1882 | Val mse = 0.3643
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.8900 | Val mse = 0.8101
Epoch 2: Train mse = 0.6919 | Val mse = 0.6540
Epoch 3: Train mse = 0.4364 | Val mse = 0.4463
Epoch 4: Train mse = 0.3461 | Val mse = 0.3738
Epoch 5: Train mse = 0.3094 | Val mse = 0.3558
Epoch 6: Train mse = 0.2847 | Val mse = 0.3552
Epoch 7: Train mse = 0.2695 | Val mse = 0.3399
Epoch 8: Train mse = 0.2680 | Val mse = 0.3689
Epoch 9: Train mse = 0.2580 | Val mse = 0.3465
Epoch 10: Train mse = 0.2422 | Val mse = 0.3390
Epoch 11: Train mse = 0.2320 | Val mse = 0.3189
Epoch 12: Train mse = 0.2166 | Val mse = 0.3341
Epoch 13: Train mse = 0.2098 | Val mse = 0.3279
Epoch 14: Train mse = 0.2035 | Val mse = 0.3244
Epoch 15: Train mse = 0.1961 | Val mse = 0.3385
Epoch 16: Train mse = 0.1923 | Val mse = 0.3361
Epoch 17: Train mse = 0.1891 | Val mse = 0.3299
Epoch 18: Train mse = 0.1844 | Val mse = 0.3393
Epoch 19: Train mse = 0.1823 | Val mse = 0.3531
Epoch 20: Train mse = 0.1819 | Val mse = 0.3418
Epoch 21: Train mse = 0.1810 | Val mse = 0.3528
Epoch 22: Train mse = 0.1852 | Val mse = 0.3572
Epoch 23: Train mse = 0.1925 | Val mse = 0.3621
Epoch 24: Train mse = 0.2041 | Val mse = 0.3551
Epoch 25: Train mse = 0.2138 | Val mse = 0.3832
Epoch 26: Train mse = 0.2124 | Val mse = 0.3488
Epoch 27: Train mse = 0.2108 | Val mse = 0.3638
Epoch 28: Train mse = 0.2045 | Val mse = 0.3574
Epoch 29: Train mse = 0.1985 | Val mse = 0.3611
Epoch 30: Train mse = 0.1932 | Val mse = 0.3468
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9971 | Val mse = 1.2215
Epoch 2: Train mse = 0.9967 | Val mse = 1.2121
Epoch 3: Train mse = 0.9869 | Val mse = 1.1968
Epoch 4: Train mse = 0.9766 | Val mse = 1.1718
Epoch 5: Train mse = 0.9652 | Val mse = 1.1337
Epoch 6: Train mse = 0.9452 | Val mse = 1.0829
Epoch 7: Train mse = 0.9337 | Val mse = 1.0290
Epoch 8: Train mse = 0.9181 | Val mse = 0.9867
Epoch 9: Train mse = 0.9064 | Val mse = 0.9627
Epoch 10: Train mse = 0.8912 | Val mse = 0.9504
Epoch 11: Train mse = 0.8809 | Val mse = 0.9436
Epoch 12: Train mse = 0.8807 | Val mse = 0.9316
Epoch 13: Train mse = 0.8712 | Val mse = 0.9212
Epoch 14: Train mse = 0.8571 | Val mse = 0.9107
Epoch 15: Train mse = 0.8510 | Val mse = 0.9003
Epoch 16: Train mse = 0.8380 | Val mse = 0.8892
Epoch 17: Train mse = 0.8251 | Val mse = 0.8737
Epoch 18: Train mse = 0.8075 | Val mse = 0.8534
Epoch 19: Train mse = 0.7861 | Val mse = 0.8309
Epoch 20: Train mse = 0.7684 | Val mse = 0.8063
Epoch 21: Train mse = 0.7415 | Val mse = 0.7827
Epoch 22: Train mse = 0.7218 | Val mse = 0.7591
Epoch 23: Train mse = 0.6983 | Val mse = 0.7368
Epoch 24: Train mse = 0.6705 | Val mse = 0.7149
Epoch 25: Train mse = 0.6453 | Val mse = 0.6948
Epoch 26: Train mse = 0.6253 | Val mse = 0.6770
Epoch 27: Train mse = 0.6027 | Val mse = 0.6631
Epoch 28: Train mse = 0.5836 | Val mse = 0.6460
Epoch 29: Train mse = 0.5623 | Val mse = 0.6307
Epoch 30: Train mse = 0.5388 | Val mse = 0.6171
Epoch 31: Train mse = 0.5204 | Val mse = 0.6008
Epoch 32: Train mse = 0.5050 | Val mse = 0.5843
Epoch 33: Train mse = 0.4853 | Val mse = 0.5652
Epoch 34: Train mse = 0.4643 | Val mse = 0.5476
Epoch 35: Train mse = 0.4491 | Val mse = 0.5313
Epoch 36: Train mse = 0.4276 | Val mse = 0.5157
Epoch 37: Train mse = 0.4134 | Val mse = 0.5000
Epoch 38: Train mse = 0.3968 | Val mse = 0.4812
Epoch 39: Train mse = 0.3819 | Val mse = 0.4667
Epoch 40: Train mse = 0.3678 | Val mse = 0.4525
Epoch 41: Train mse = 0.3501 | Val mse = 0.4417
Epoch 42: Train mse = 0.3416 | Val mse = 0.4266
Epoch 43: Train mse = 0.3321 | Val mse = 0.4179
Epoch 44: Train mse = 0.3202 | Val mse = 0.4132
Epoch 45: Train mse = 0.3146 | Val mse = 0.4019
Epoch 46: Train mse = 0.3098 | Val mse = 0.3958
Epoch 47: Train mse = 0.2984 | Val mse = 0.3911
Epoch 48: Train mse = 0.3008 | Val mse = 0.3845
Epoch 49: Train mse = 0.2917 | Val mse = 0.3816
Epoch 50: Train mse = 0.2900 | Val mse = 0.3825
Using device: mps
Epoch 1: Train mse = 0.9972 | Val mse = 1.2215
Epoch 2: Train mse = 0.9970 | Val mse = 1.2122
Epoch 3: Train mse = 0.9874 | Val mse = 1.1973
Epoch 4: Train mse = 0.9773 | Val mse = 1.1731
Epoch 5: Train mse = 0.9661 | Val mse = 1.1356
Epoch 6: Train mse = 0.9462 | Val mse = 1.0851
Epoch 7: Train mse = 0.9346 | Val mse = 1.0309
Epoch 8: Train mse = 0.9189 | Val mse = 0.9882
Epoch 9: Train mse = 0.9073 | Val mse = 0.9643
Epoch 10: Train mse = 0.8922 | Val mse = 0.9520
Epoch 11: Train mse = 0.8820 | Val mse = 0.9450
Epoch 12: Train mse = 0.8820 | Val mse = 0.9326
Epoch 13: Train mse = 0.8726 | Val mse = 0.9217
Epoch 14: Train mse = 0.8590 | Val mse = 0.9110
Epoch 15: Train mse = 0.8535 | Val mse = 0.9010
Epoch 16: Train mse = 0.8415 | Val mse = 0.8908
Epoch 17: Train mse = 0.8300 | Val mse = 0.8764
Epoch 18: Train mse = 0.8138 | Val mse = 0.8565
Epoch 19: Train mse = 0.7937 | Val mse = 0.8340
Epoch 20: Train mse = 0.7770 | Val mse = 0.8093
Epoch 21: Train mse = 0.7508 | Val mse = 0.7854
Epoch 22: Train mse = 0.7311 | Val mse = 0.7615
Epoch 23: Train mse = 0.7075 | Val mse = 0.7385
Epoch 24: Train mse = 0.6795 | Val mse = 0.7161
Epoch 25: Train mse = 0.6540 | Val mse = 0.6958
Epoch 26: Train mse = 0.6337 | Val mse = 0.6782
Epoch 27: Train mse = 0.6111 | Val mse = 0.6647
Epoch 28: Train mse = 0.5919 | Val mse = 0.6476
Epoch 29: Train mse = 0.5709 | Val mse = 0.6325
Epoch 30: Train mse = 0.5474 | Val mse = 0.6192
Epoch 31: Train mse = 0.5296 | Val mse = 0.6033
Epoch 32: Train mse = 0.5146 | Val mse = 0.5868
Epoch 33: Train mse = 0.4951 | Val mse = 0.5672
Epoch 34: Train mse = 0.4743 | Val mse = 0.5492
Epoch 35: Train mse = 0.4594 | Val mse = 0.5323
Epoch 36: Train mse = 0.4376 | Val mse = 0.5157
Epoch 37: Train mse = 0.4234 | Val mse = 0.4993
Epoch 38: Train mse = 0.4064 | Val mse = 0.4799
Epoch 39: Train mse = 0.3910 | Val mse = 0.4643
Epoch 40: Train mse = 0.3762 | Val mse = 0.4496
Epoch 41: Train mse = 0.3580 | Val mse = 0.4378
Epoch 42: Train mse = 0.3489 | Val mse = 0.4219
Epoch 43: Train mse = 0.3390 | Val mse = 0.4126
Epoch 44: Train mse = 0.3265 | Val mse = 0.4072
Epoch 45: Train mse = 0.3205 | Val mse = 0.3955
Epoch 46: Train mse = 0.3156 | Val mse = 0.3889
Epoch 47: Train mse = 0.3037 | Val mse = 0.3835
Epoch 48: Train mse = 0.3062 | Val mse = 0.3768
Epoch 49: Train mse = 0.2971 | Val mse = 0.3735
Epoch 50: Train mse = 0.2952 | Val mse = 0.3739
Using device: mps
Epoch 1: Train mse = 0.9685 | Val mse = 0.9857
Epoch 2: Train mse = 0.8959 | Val mse = 0.9374
Epoch 3: Train mse = 0.8397 | Val mse = 0.8505
Epoch 4: Train mse = 0.7428 | Val mse = 0.7307
Epoch 5: Train mse = 0.6379 | Val mse = 0.6530
Epoch 6: Train mse = 0.5467 | Val mse = 0.5881
Epoch 7: Train mse = 0.4532 | Val mse = 0.4906
Epoch 8: Train mse = 0.3698 | Val mse = 0.4048
Epoch 9: Train mse = 0.3205 | Val mse = 0.3730
Epoch 10: Train mse = 0.2980 | Val mse = 0.3779
Epoch 11: Train mse = 0.2837 | Val mse = 0.3471
Epoch 12: Train mse = 0.2719 | Val mse = 0.3548
Epoch 13: Train mse = 0.2591 | Val mse = 0.3498
Epoch 14: Train mse = 0.2527 | Val mse = 0.3551
Epoch 15: Train mse = 0.2438 | Val mse = 0.3635
Epoch 16: Train mse = 0.2394 | Val mse = 0.3649
Epoch 17: Train mse = 0.2278 | Val mse = 0.3666
Epoch 18: Train mse = 0.2201 | Val mse = 0.3757
Epoch 19: Train mse = 0.2131 | Val mse = 0.3766
Epoch 20: Train mse = 0.2091 | Val mse = 0.3754
Epoch 21: Train mse = 0.2044 | Val mse = 0.3770
Epoch 22: Train mse = 0.2012 | Val mse = 0.3778
Epoch 23: Train mse = 0.1955 | Val mse = 0.3878
Epoch 24: Train mse = 0.1897 | Val mse = 0.3858
Epoch 25: Train mse = 0.1835 | Val mse = 0.3926
Epoch 26: Train mse = 0.1804 | Val mse = 0.3928
Epoch 27: Train mse = 0.1795 | Val mse = 0.3954
Epoch 28: Train mse = 0.1759 | Val mse = 0.3904
Epoch 29: Train mse = 0.1723 | Val mse = 0.3996
Epoch 30: Train mse = 0.1684 | Val mse = 0.4006
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9691 | Val mse = 0.9872
Epoch 2: Train mse = 0.8967 | Val mse = 0.9410
Epoch 3: Train mse = 0.8445 | Val mse = 0.8512
Epoch 4: Train mse = 0.7522 | Val mse = 0.7375
Epoch 5: Train mse = 0.6476 | Val mse = 0.6567
Epoch 6: Train mse = 0.5574 | Val mse = 0.5951
Epoch 7: Train mse = 0.4656 | Val mse = 0.4990
Epoch 8: Train mse = 0.3798 | Val mse = 0.4065
Epoch 9: Train mse = 0.3258 | Val mse = 0.3683
Epoch 10: Train mse = 0.3023 | Val mse = 0.3696
Epoch 11: Train mse = 0.2872 | Val mse = 0.3416
Epoch 12: Train mse = 0.2771 | Val mse = 0.3469
Epoch 13: Train mse = 0.2643 | Val mse = 0.3434
Epoch 14: Train mse = 0.2586 | Val mse = 0.3487
Epoch 15: Train mse = 0.2494 | Val mse = 0.3661
Epoch 16: Train mse = 0.2443 | Val mse = 0.3610
Epoch 17: Train mse = 0.2340 | Val mse = 0.3653
Epoch 18: Train mse = 0.2265 | Val mse = 0.3780
Epoch 19: Train mse = 0.2203 | Val mse = 0.3750
Epoch 20: Train mse = 0.2172 | Val mse = 0.3766
Epoch 21: Train mse = 0.2124 | Val mse = 0.3728
Epoch 22: Train mse = 0.2101 | Val mse = 0.3729
Epoch 23: Train mse = 0.2046 | Val mse = 0.3809
Epoch 24: Train mse = 0.1973 | Val mse = 0.3844
Epoch 25: Train mse = 0.1903 | Val mse = 0.3899
Epoch 26: Train mse = 0.1865 | Val mse = 0.3909
Epoch 27: Train mse = 0.1848 | Val mse = 0.3929
Epoch 28: Train mse = 0.1815 | Val mse = 0.3957
Epoch 29: Train mse = 0.1761 | Val mse = 0.4010
Epoch 30: Train mse = 0.1725 | Val mse = 0.3972
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 1.0003 | Val mse = 1.2205
Epoch 2: Train mse = 0.9890 | Val mse = 1.2104
Epoch 3: Train mse = 0.9823 | Val mse = 1.1921
Epoch 4: Train mse = 0.9777 | Val mse = 1.1622
Epoch 5: Train mse = 0.9638 | Val mse = 1.1203
Epoch 6: Train mse = 0.9450 | Val mse = 1.0697
Epoch 7: Train mse = 0.9274 | Val mse = 1.0211
Epoch 8: Train mse = 0.9158 | Val mse = 0.9860
Epoch 9: Train mse = 0.9024 | Val mse = 0.9656
Epoch 10: Train mse = 0.8959 | Val mse = 0.9560
Epoch 11: Train mse = 0.8769 | Val mse = 0.9443
Epoch 12: Train mse = 0.8711 | Val mse = 0.9284
Epoch 13: Train mse = 0.8590 | Val mse = 0.9121
Epoch 14: Train mse = 0.8429 | Val mse = 0.8967
Epoch 15: Train mse = 0.8291 | Val mse = 0.8825
Epoch 16: Train mse = 0.8137 | Val mse = 0.8664
Epoch 17: Train mse = 0.7918 | Val mse = 0.8488
Epoch 18: Train mse = 0.7765 | Val mse = 0.8346
Epoch 19: Train mse = 0.7573 | Val mse = 0.8192
Epoch 20: Train mse = 0.7417 | Val mse = 0.8031
Epoch 21: Train mse = 0.7213 | Val mse = 0.7870
Epoch 22: Train mse = 0.7006 | Val mse = 0.7703
Epoch 23: Train mse = 0.6852 | Val mse = 0.7534
Epoch 24: Train mse = 0.6648 | Val mse = 0.7355
Epoch 25: Train mse = 0.6419 | Val mse = 0.7169
Epoch 26: Train mse = 0.6209 | Val mse = 0.6989
Epoch 27: Train mse = 0.6000 | Val mse = 0.6790
Epoch 28: Train mse = 0.5737 | Val mse = 0.6633
Epoch 29: Train mse = 0.5543 | Val mse = 0.6472
Epoch 30: Train mse = 0.5344 | Val mse = 0.6296
Epoch 31: Train mse = 0.5092 | Val mse = 0.6117
Epoch 32: Train mse = 0.4918 | Val mse = 0.5952
Epoch 33: Train mse = 0.4680 | Val mse = 0.5791
Epoch 34: Train mse = 0.4502 | Val mse = 0.5638
Epoch 35: Train mse = 0.4286 | Val mse = 0.5497
Epoch 36: Train mse = 0.4092 | Val mse = 0.5340
Epoch 37: Train mse = 0.3948 | Val mse = 0.5201
Epoch 38: Train mse = 0.3773 | Val mse = 0.5053
Epoch 39: Train mse = 0.3664 | Val mse = 0.4928
Epoch 40: Train mse = 0.3513 | Val mse = 0.4816
Epoch 41: Train mse = 0.3358 | Val mse = 0.4703
Epoch 42: Train mse = 0.3289 | Val mse = 0.4594
Epoch 43: Train mse = 0.3171 | Val mse = 0.4533
Epoch 44: Train mse = 0.3025 | Val mse = 0.4437
Epoch 45: Train mse = 0.2993 | Val mse = 0.4380
Epoch 46: Train mse = 0.2910 | Val mse = 0.4340
Epoch 47: Train mse = 0.2840 | Val mse = 0.4289
Epoch 48: Train mse = 0.2770 | Val mse = 0.4241
Epoch 49: Train mse = 0.2721 | Val mse = 0.4217
Epoch 50: Train mse = 0.2631 | Val mse = 0.4179
Using device: mps
Epoch 1: Train mse = 1.0004 | Val mse = 1.2205
Epoch 2: Train mse = 0.9893 | Val mse = 1.2109
Epoch 3: Train mse = 0.9828 | Val mse = 1.1932
Epoch 4: Train mse = 0.9783 | Val mse = 1.1638
Epoch 5: Train mse = 0.9644 | Val mse = 1.1220
Epoch 6: Train mse = 0.9455 | Val mse = 1.0709
Epoch 7: Train mse = 0.9276 | Val mse = 1.0217
Epoch 8: Train mse = 0.9159 | Val mse = 0.9863
Epoch 9: Train mse = 0.9024 | Val mse = 0.9659
Epoch 10: Train mse = 0.8959 | Val mse = 0.9563
Epoch 11: Train mse = 0.8769 | Val mse = 0.9443
Epoch 12: Train mse = 0.8712 | Val mse = 0.9281
Epoch 13: Train mse = 0.8594 | Val mse = 0.9117
Epoch 14: Train mse = 0.8432 | Val mse = 0.8964
Epoch 15: Train mse = 0.8292 | Val mse = 0.8821
Epoch 16: Train mse = 0.8135 | Val mse = 0.8652
Epoch 17: Train mse = 0.7913 | Val mse = 0.8469
Epoch 18: Train mse = 0.7762 | Val mse = 0.8324
Epoch 19: Train mse = 0.7570 | Val mse = 0.8169
Epoch 20: Train mse = 0.7420 | Val mse = 0.8008
Epoch 21: Train mse = 0.7221 | Val mse = 0.7852
Epoch 22: Train mse = 0.7022 | Val mse = 0.7690
Epoch 23: Train mse = 0.6876 | Val mse = 0.7528
Epoch 24: Train mse = 0.6678 | Val mse = 0.7353
Epoch 25: Train mse = 0.6450 | Val mse = 0.7160
Epoch 26: Train mse = 0.6243 | Val mse = 0.6973
Epoch 27: Train mse = 0.6036 | Val mse = 0.6767
Epoch 28: Train mse = 0.5775 | Val mse = 0.6605
Epoch 29: Train mse = 0.5584 | Val mse = 0.6443
Epoch 30: Train mse = 0.5386 | Val mse = 0.6269
Epoch 31: Train mse = 0.5135 | Val mse = 0.6090
Epoch 32: Train mse = 0.4960 | Val mse = 0.5923
Epoch 33: Train mse = 0.4723 | Val mse = 0.5754
Epoch 34: Train mse = 0.4543 | Val mse = 0.5594
Epoch 35: Train mse = 0.4320 | Val mse = 0.5450
Epoch 36: Train mse = 0.4122 | Val mse = 0.5282
Epoch 37: Train mse = 0.3972 | Val mse = 0.5132
Epoch 38: Train mse = 0.3793 | Val mse = 0.4972
Epoch 39: Train mse = 0.3685 | Val mse = 0.4836
Epoch 40: Train mse = 0.3533 | Val mse = 0.4715
Epoch 41: Train mse = 0.3380 | Val mse = 0.4603
Epoch 42: Train mse = 0.3314 | Val mse = 0.4475
Epoch 43: Train mse = 0.3196 | Val mse = 0.4404
Epoch 44: Train mse = 0.3050 | Val mse = 0.4300
Epoch 45: Train mse = 0.3024 | Val mse = 0.4233
Epoch 46: Train mse = 0.2942 | Val mse = 0.4186
Epoch 47: Train mse = 0.2873 | Val mse = 0.4133
Epoch 48: Train mse = 0.2807 | Val mse = 0.4082
Epoch 49: Train mse = 0.2765 | Val mse = 0.4047
Epoch 50: Train mse = 0.2674 | Val mse = 0.4018
Using device: mps
Epoch 1: Train mse = 0.9725 | Val mse = 0.9854
Epoch 2: Train mse = 0.8858 | Val mse = 0.9098
Epoch 3: Train mse = 0.8097 | Val mse = 0.8100
Epoch 4: Train mse = 0.7196 | Val mse = 0.7290
Epoch 5: Train mse = 0.6143 | Val mse = 0.6412
Epoch 6: Train mse = 0.5035 | Val mse = 0.5620
Epoch 7: Train mse = 0.4042 | Val mse = 0.4604
Epoch 8: Train mse = 0.3351 | Val mse = 0.4207
Epoch 9: Train mse = 0.2987 | Val mse = 0.3953
Epoch 10: Train mse = 0.2837 | Val mse = 0.3808
Epoch 11: Train mse = 0.2623 | Val mse = 0.3761
Epoch 12: Train mse = 0.2461 | Val mse = 0.3612
Epoch 13: Train mse = 0.2336 | Val mse = 0.3740
Epoch 14: Train mse = 0.2208 | Val mse = 0.3623
Epoch 15: Train mse = 0.2107 | Val mse = 0.3624
Epoch 16: Train mse = 0.2036 | Val mse = 0.3724
Epoch 17: Train mse = 0.1975 | Val mse = 0.3778
Epoch 18: Train mse = 0.1885 | Val mse = 0.3697
Epoch 19: Train mse = 0.1838 | Val mse = 0.3862
Epoch 20: Train mse = 0.1801 | Val mse = 0.3824
Epoch 21: Train mse = 0.1765 | Val mse = 0.3806
Epoch 22: Train mse = 0.1693 | Val mse = 0.3813
Epoch 23: Train mse = 0.1656 | Val mse = 0.3917
Epoch 24: Train mse = 0.1624 | Val mse = 0.3988
Epoch 25: Train mse = 0.1588 | Val mse = 0.3978
Epoch 26: Train mse = 0.1554 | Val mse = 0.3988
Epoch 27: Train mse = 0.1505 | Val mse = 0.4061
Epoch 28: Train mse = 0.1486 | Val mse = 0.4034
Epoch 29: Train mse = 0.1454 | Val mse = 0.4072
Epoch 30: Train mse = 0.1440 | Val mse = 0.4168
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9730 | Val mse = 0.9869
Epoch 2: Train mse = 0.8856 | Val mse = 0.9103
Epoch 3: Train mse = 0.8099 | Val mse = 0.8111
Epoch 4: Train mse = 0.7220 | Val mse = 0.7307
Epoch 5: Train mse = 0.6197 | Val mse = 0.6445
Epoch 6: Train mse = 0.5105 | Val mse = 0.5608
Epoch 7: Train mse = 0.4113 | Val mse = 0.4553
Epoch 8: Train mse = 0.3402 | Val mse = 0.4135
Epoch 9: Train mse = 0.3013 | Val mse = 0.3983
Epoch 10: Train mse = 0.2856 | Val mse = 0.3728
Epoch 11: Train mse = 0.2661 | Val mse = 0.3780
Epoch 12: Train mse = 0.2521 | Val mse = 0.3584
Epoch 13: Train mse = 0.2381 | Val mse = 0.3517
Epoch 14: Train mse = 0.2250 | Val mse = 0.3613
Epoch 15: Train mse = 0.2170 | Val mse = 0.3554
Epoch 16: Train mse = 0.2089 | Val mse = 0.3588
Epoch 17: Train mse = 0.2013 | Val mse = 0.3610
Epoch 18: Train mse = 0.1925 | Val mse = 0.3558
Epoch 19: Train mse = 0.1883 | Val mse = 0.3613
Epoch 20: Train mse = 0.1844 | Val mse = 0.3647
Epoch 21: Train mse = 0.1802 | Val mse = 0.3620
Epoch 22: Train mse = 0.1734 | Val mse = 0.3704
Epoch 23: Train mse = 0.1695 | Val mse = 0.3689
Epoch 24: Train mse = 0.1663 | Val mse = 0.3801
Epoch 25: Train mse = 0.1624 | Val mse = 0.3773
Epoch 26: Train mse = 0.1581 | Val mse = 0.3725
Epoch 27: Train mse = 0.1538 | Val mse = 0.3744
Epoch 28: Train mse = 0.1516 | Val mse = 0.3809
Epoch 29: Train mse = 0.1476 | Val mse = 0.3962
Epoch 30: Train mse = 0.1447 | Val mse = 0.3978
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9959 | Val mse = 1.2294
Epoch 2: Train mse = 1.0000 | Val mse = 1.2266
Epoch 3: Train mse = 0.9927 | Val mse = 1.2216
Epoch 4: Train mse = 0.9919 | Val mse = 1.2132
Epoch 5: Train mse = 0.9918 | Val mse = 1.1940
Epoch 6: Train mse = 0.9723 | Val mse = 1.1559
Epoch 7: Train mse = 0.9559 | Val mse = 1.0955
Epoch 8: Train mse = 0.9308 | Val mse = 1.0264
Epoch 9: Train mse = 0.9158 | Val mse = 0.9763
Epoch 10: Train mse = 0.8956 | Val mse = 0.9580
Epoch 11: Train mse = 0.8934 | Val mse = 0.9535
Epoch 12: Train mse = 0.8865 | Val mse = 0.9500
Epoch 13: Train mse = 0.8778 | Val mse = 0.9471
Epoch 14: Train mse = 0.8692 | Val mse = 0.9399
Epoch 15: Train mse = 0.8697 | Val mse = 0.9301
Epoch 16: Train mse = 0.8576 | Val mse = 0.9263
Epoch 17: Train mse = 0.8611 | Val mse = 0.9244
Epoch 18: Train mse = 0.8570 | Val mse = 0.9215
Epoch 19: Train mse = 0.8527 | Val mse = 0.9176
Epoch 20: Train mse = 0.8502 | Val mse = 0.9128
Epoch 21: Train mse = 0.8487 | Val mse = 0.9102
Epoch 22: Train mse = 0.8437 | Val mse = 0.9072
Epoch 23: Train mse = 0.8362 | Val mse = 0.9037
Epoch 24: Train mse = 0.8389 | Val mse = 0.8995
Epoch 25: Train mse = 0.8288 | Val mse = 0.8954
Epoch 26: Train mse = 0.8232 | Val mse = 0.8855
Epoch 27: Train mse = 0.8132 | Val mse = 0.8733
Epoch 28: Train mse = 0.7970 | Val mse = 0.8591
Epoch 29: Train mse = 0.7784 | Val mse = 0.8358
Epoch 30: Train mse = 0.7625 | Val mse = 0.8188
Epoch 31: Train mse = 0.7369 | Val mse = 0.8008
Epoch 32: Train mse = 0.7172 | Val mse = 0.7896
Epoch 33: Train mse = 0.7083 | Val mse = 0.7892
Epoch 34: Train mse = 0.6941 | Val mse = 0.7871
Epoch 35: Train mse = 0.6749 | Val mse = 0.7872
Epoch 36: Train mse = 0.6637 | Val mse = 0.7874
Epoch 37: Train mse = 0.6472 | Val mse = 0.7830
Epoch 38: Train mse = 0.6307 | Val mse = 0.7790
Epoch 39: Train mse = 0.6154 | Val mse = 0.7707
Epoch 40: Train mse = 0.6066 | Val mse = 0.7593
Epoch 41: Train mse = 0.5862 | Val mse = 0.7457
Epoch 42: Train mse = 0.5705 | Val mse = 0.7339
Epoch 43: Train mse = 0.5487 | Val mse = 0.7166
Epoch 44: Train mse = 0.5328 | Val mse = 0.7000
Epoch 45: Train mse = 0.5126 | Val mse = 0.6875
Epoch 46: Train mse = 0.4952 | Val mse = 0.6736
Epoch 47: Train mse = 0.4788 | Val mse = 0.6600
Epoch 48: Train mse = 0.4669 | Val mse = 0.6488
Epoch 49: Train mse = 0.4476 | Val mse = 0.6359
Epoch 50: Train mse = 0.4311 | Val mse = 0.6287
Using device: mps
Epoch 1: Train mse = 0.9959 | Val mse = 1.2294
Epoch 2: Train mse = 1.0000 | Val mse = 1.2266
Epoch 3: Train mse = 0.9929 | Val mse = 1.2218
Epoch 4: Train mse = 0.9923 | Val mse = 1.2142
Epoch 5: Train mse = 0.9928 | Val mse = 1.1966
Epoch 6: Train mse = 0.9740 | Val mse = 1.1608
Epoch 7: Train mse = 0.9581 | Val mse = 1.1005
Epoch 8: Train mse = 0.9325 | Val mse = 1.0272
Epoch 9: Train mse = 0.9167 | Val mse = 0.9742
Epoch 10: Train mse = 0.8960 | Val mse = 0.9565
Epoch 11: Train mse = 0.8934 | Val mse = 0.9528
Epoch 12: Train mse = 0.8868 | Val mse = 0.9502
Epoch 13: Train mse = 0.8786 | Val mse = 0.9482
Epoch 14: Train mse = 0.8704 | Val mse = 0.9422
Epoch 15: Train mse = 0.8710 | Val mse = 0.9329
Epoch 16: Train mse = 0.8590 | Val mse = 0.9282
Epoch 17: Train mse = 0.8625 | Val mse = 0.9254
Epoch 18: Train mse = 0.8584 | Val mse = 0.9232
Epoch 19: Train mse = 0.8543 | Val mse = 0.9200
Epoch 20: Train mse = 0.8519 | Val mse = 0.9151
Epoch 21: Train mse = 0.8508 | Val mse = 0.9116
Epoch 22: Train mse = 0.8462 | Val mse = 0.9087
Epoch 23: Train mse = 0.8395 | Val mse = 0.9059
Epoch 24: Train mse = 0.8434 | Val mse = 0.9020
Epoch 25: Train mse = 0.8346 | Val mse = 0.8985
Epoch 26: Train mse = 0.8316 | Val mse = 0.8913
Epoch 27: Train mse = 0.8242 | Val mse = 0.8815
Epoch 28: Train mse = 0.8117 | Val mse = 0.8712
Epoch 29: Train mse = 0.7969 | Val mse = 0.8505
Epoch 30: Train mse = 0.7829 | Val mse = 0.8322
Epoch 31: Train mse = 0.7564 | Val mse = 0.8109
Epoch 32: Train mse = 0.7335 | Val mse = 0.7938
Epoch 33: Train mse = 0.7239 | Val mse = 0.7896
Epoch 34: Train mse = 0.7100 | Val mse = 0.7837
Epoch 35: Train mse = 0.6906 | Val mse = 0.7815
Epoch 36: Train mse = 0.6803 | Val mse = 0.7826
Epoch 37: Train mse = 0.6637 | Val mse = 0.7811
Epoch 38: Train mse = 0.6466 | Val mse = 0.7768
Epoch 39: Train mse = 0.6308 | Val mse = 0.7698
Epoch 40: Train mse = 0.6217 | Val mse = 0.7599
Epoch 41: Train mse = 0.6011 | Val mse = 0.7475
Epoch 42: Train mse = 0.5852 | Val mse = 0.7349
Epoch 43: Train mse = 0.5640 | Val mse = 0.7161
Epoch 44: Train mse = 0.5471 | Val mse = 0.6951
Epoch 45: Train mse = 0.5263 | Val mse = 0.6784
Epoch 46: Train mse = 0.5080 | Val mse = 0.6599
Epoch 47: Train mse = 0.4903 | Val mse = 0.6433
Epoch 48: Train mse = 0.4790 | Val mse = 0.6308
Epoch 49: Train mse = 0.4579 | Val mse = 0.6161
Epoch 50: Train mse = 0.4397 | Val mse = 0.6083
Using device: mps
Epoch 1: Train mse = 0.9877 | Val mse = 1.1144
Epoch 2: Train mse = 0.9130 | Val mse = 0.9581
Epoch 3: Train mse = 0.8794 | Val mse = 0.9841
Epoch 4: Train mse = 0.8658 | Val mse = 0.9116
Epoch 5: Train mse = 0.8495 | Val mse = 0.9059
Epoch 6: Train mse = 0.8013 | Val mse = 0.8323
Epoch 7: Train mse = 0.7451 | Val mse = 0.7851
Epoch 8: Train mse = 0.7050 | Val mse = 0.7991
Epoch 9: Train mse = 0.6524 | Val mse = 0.6986
Epoch 10: Train mse = 0.5454 | Val mse = 0.5976
Epoch 11: Train mse = 0.4423 | Val mse = 0.5205
Epoch 12: Train mse = 0.3598 | Val mse = 0.4112
Epoch 13: Train mse = 0.3123 | Val mse = 0.3804
Epoch 14: Train mse = 0.3024 | Val mse = 0.3625
Epoch 15: Train mse = 0.2879 | Val mse = 0.3482
Epoch 16: Train mse = 0.2720 | Val mse = 0.3673
Epoch 17: Train mse = 0.2646 | Val mse = 0.3371
Epoch 18: Train mse = 0.2560 | Val mse = 0.3430
Epoch 19: Train mse = 0.2485 | Val mse = 0.3613
Epoch 20: Train mse = 0.2421 | Val mse = 0.3318
Epoch 21: Train mse = 0.2389 | Val mse = 0.3369
Epoch 22: Train mse = 0.2324 | Val mse = 0.3529
Epoch 23: Train mse = 0.2313 | Val mse = 0.3347
Epoch 24: Train mse = 0.2232 | Val mse = 0.3586
Epoch 25: Train mse = 0.2160 | Val mse = 0.3612
Epoch 26: Train mse = 0.2124 | Val mse = 0.3576
Epoch 27: Train mse = 0.2066 | Val mse = 0.3592
Epoch 28: Train mse = 0.2052 | Val mse = 0.3601
Epoch 29: Train mse = 0.2024 | Val mse = 0.3549
Epoch 30: Train mse = 0.1997 | Val mse = 0.3692
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9883 | Val mse = 1.1191
Epoch 2: Train mse = 0.9136 | Val mse = 0.9599
Epoch 3: Train mse = 0.8804 | Val mse = 0.9836
Epoch 4: Train mse = 0.8675 | Val mse = 0.9163
Epoch 5: Train mse = 0.8538 | Val mse = 0.9114
Epoch 6: Train mse = 0.8175 | Val mse = 0.8481
Epoch 7: Train mse = 0.7555 | Val mse = 0.7845
Epoch 8: Train mse = 0.7156 | Val mse = 0.8005
Epoch 9: Train mse = 0.6692 | Val mse = 0.7174
Epoch 10: Train mse = 0.5679 | Val mse = 0.6145
Epoch 11: Train mse = 0.4609 | Val mse = 0.5413
Epoch 12: Train mse = 0.3703 | Val mse = 0.4254
Epoch 13: Train mse = 0.3195 | Val mse = 0.3699
Epoch 14: Train mse = 0.3036 | Val mse = 0.3503
Epoch 15: Train mse = 0.2936 | Val mse = 0.3342
Epoch 16: Train mse = 0.2747 | Val mse = 0.3589
Epoch 17: Train mse = 0.2680 | Val mse = 0.3381
Epoch 18: Train mse = 0.2593 | Val mse = 0.3469
Epoch 19: Train mse = 0.2537 | Val mse = 0.3219
Epoch 20: Train mse = 0.2487 | Val mse = 0.3262
Epoch 21: Train mse = 0.2448 | Val mse = 0.3167
Epoch 22: Train mse = 0.2394 | Val mse = 0.3207
Epoch 23: Train mse = 0.2362 | Val mse = 0.3344
Epoch 24: Train mse = 0.2309 | Val mse = 0.3465
Epoch 25: Train mse = 0.2236 | Val mse = 0.3455
Epoch 26: Train mse = 0.2214 | Val mse = 0.3325
Epoch 27: Train mse = 0.2163 | Val mse = 0.3406
Epoch 28: Train mse = 0.2152 | Val mse = 0.3578
Epoch 29: Train mse = 0.2127 | Val mse = 0.3443
Epoch 30: Train mse = 0.2078 | Val mse = 0.3574
Epoch 31: Train mse = 0.2034 | Val mse = 0.3573
Early stopping triggered at epoch 31.
Using device: mps
Epoch 1: Train mse = 1.0042 | Val mse = 1.2253
Epoch 2: Train mse = 0.9962 | Val mse = 1.2243
Epoch 3: Train mse = 0.9996 | Val mse = 1.2200
Epoch 4: Train mse = 0.9954 | Val mse = 1.2096
Epoch 5: Train mse = 0.9793 | Val mse = 1.1860
Epoch 6: Train mse = 0.9734 | Val mse = 1.1467
Epoch 7: Train mse = 0.9580 | Val mse = 1.0980
Epoch 8: Train mse = 0.9360 | Val mse = 1.0445
Epoch 9: Train mse = 0.9174 | Val mse = 0.9961
Epoch 10: Train mse = 0.9065 | Val mse = 0.9715
Epoch 11: Train mse = 0.8950 | Val mse = 0.9608
Epoch 12: Train mse = 0.8840 | Val mse = 0.9477
Epoch 13: Train mse = 0.8738 | Val mse = 0.9324
Epoch 14: Train mse = 0.8641 | Val mse = 0.9184
Epoch 15: Train mse = 0.8519 | Val mse = 0.9102
Epoch 16: Train mse = 0.8351 | Val mse = 0.8984
Epoch 17: Train mse = 0.8194 | Val mse = 0.8854
Epoch 18: Train mse = 0.8039 | Val mse = 0.8652
Epoch 19: Train mse = 0.7865 | Val mse = 0.8460
Epoch 20: Train mse = 0.7666 | Val mse = 0.8330
Epoch 21: Train mse = 0.7552 | Val mse = 0.8188
Epoch 22: Train mse = 0.7366 | Val mse = 0.8120
Epoch 23: Train mse = 0.7282 | Val mse = 0.8041
Epoch 24: Train mse = 0.7134 | Val mse = 0.7979
Epoch 25: Train mse = 0.7038 | Val mse = 0.7946
Epoch 26: Train mse = 0.6927 | Val mse = 0.7871
Epoch 27: Train mse = 0.6832 | Val mse = 0.7849
Epoch 28: Train mse = 0.6697 | Val mse = 0.7754
Epoch 29: Train mse = 0.6572 | Val mse = 0.7715
Epoch 30: Train mse = 0.6450 | Val mse = 0.7641
Epoch 31: Train mse = 0.6233 | Val mse = 0.7573
Epoch 32: Train mse = 0.6139 | Val mse = 0.7500
Epoch 33: Train mse = 0.5949 | Val mse = 0.7411
Epoch 34: Train mse = 0.5816 | Val mse = 0.7327
Epoch 35: Train mse = 0.5646 | Val mse = 0.7238
Epoch 36: Train mse = 0.5434 | Val mse = 0.7144
Epoch 37: Train mse = 0.5258 | Val mse = 0.7056
Epoch 38: Train mse = 0.5110 | Val mse = 0.6941
Epoch 39: Train mse = 0.4909 | Val mse = 0.6856
Epoch 40: Train mse = 0.4676 | Val mse = 0.6776
Epoch 41: Train mse = 0.4553 | Val mse = 0.6674
Epoch 42: Train mse = 0.4373 | Val mse = 0.6614
Epoch 43: Train mse = 0.4132 | Val mse = 0.6506
Epoch 44: Train mse = 0.3985 | Val mse = 0.6415
Epoch 45: Train mse = 0.3774 | Val mse = 0.6391
Epoch 46: Train mse = 0.3640 | Val mse = 0.6335
Epoch 47: Train mse = 0.3503 | Val mse = 0.6268
Epoch 48: Train mse = 0.3311 | Val mse = 0.6260
Epoch 49: Train mse = 0.3231 | Val mse = 0.6187
Epoch 50: Train mse = 0.3074 | Val mse = 0.6145
Using device: mps
Epoch 1: Train mse = 1.0042 | Val mse = 1.2252
Epoch 2: Train mse = 0.9963 | Val mse = 1.2244
Epoch 3: Train mse = 0.9998 | Val mse = 1.2205
Epoch 4: Train mse = 0.9961 | Val mse = 1.2115
Epoch 5: Train mse = 0.9806 | Val mse = 1.1902
Epoch 6: Train mse = 0.9753 | Val mse = 1.1512
Epoch 7: Train mse = 0.9596 | Val mse = 1.0985
Epoch 8: Train mse = 0.9363 | Val mse = 1.0408
Epoch 9: Train mse = 0.9165 | Val mse = 0.9916
Epoch 10: Train mse = 0.9056 | Val mse = 0.9685
Epoch 11: Train mse = 0.8945 | Val mse = 0.9598
Epoch 12: Train mse = 0.8842 | Val mse = 0.9480
Epoch 13: Train mse = 0.8747 | Val mse = 0.9332
Epoch 14: Train mse = 0.8659 | Val mse = 0.9202
Epoch 15: Train mse = 0.8556 | Val mse = 0.9129
Epoch 16: Train mse = 0.8407 | Val mse = 0.9030
Epoch 17: Train mse = 0.8273 | Val mse = 0.8923
Epoch 18: Train mse = 0.8132 | Val mse = 0.8731
Epoch 19: Train mse = 0.7955 | Val mse = 0.8530
Epoch 20: Train mse = 0.7741 | Val mse = 0.8389
Epoch 21: Train mse = 0.7609 | Val mse = 0.8236
Epoch 22: Train mse = 0.7412 | Val mse = 0.8154
Epoch 23: Train mse = 0.7325 | Val mse = 0.8065
Epoch 24: Train mse = 0.7176 | Val mse = 0.8003
Epoch 25: Train mse = 0.7084 | Val mse = 0.7983
Epoch 26: Train mse = 0.6985 | Val mse = 0.7911
Epoch 27: Train mse = 0.6893 | Val mse = 0.7905
Epoch 28: Train mse = 0.6771 | Val mse = 0.7814
Epoch 29: Train mse = 0.6653 | Val mse = 0.7779
Epoch 30: Train mse = 0.6551 | Val mse = 0.7720
Epoch 31: Train mse = 0.6349 | Val mse = 0.7673
Epoch 32: Train mse = 0.6268 | Val mse = 0.7595
Epoch 33: Train mse = 0.6086 | Val mse = 0.7525
Epoch 34: Train mse = 0.5942 | Val mse = 0.7441
Epoch 35: Train mse = 0.5753 | Val mse = 0.7348
Epoch 36: Train mse = 0.5509 | Val mse = 0.7270
Epoch 37: Train mse = 0.5309 | Val mse = 0.7169
Epoch 38: Train mse = 0.5128 | Val mse = 0.7087
Epoch 39: Train mse = 0.4906 | Val mse = 0.6956
Epoch 40: Train mse = 0.4689 | Val mse = 0.6851
Epoch 41: Train mse = 0.4558 | Val mse = 0.6704
Epoch 42: Train mse = 0.4413 | Val mse = 0.6645
Epoch 43: Train mse = 0.4186 | Val mse = 0.6484
Epoch 44: Train mse = 0.4081 | Val mse = 0.6369
Epoch 45: Train mse = 0.3888 | Val mse = 0.6308
Epoch 46: Train mse = 0.3773 | Val mse = 0.6259
Epoch 47: Train mse = 0.3655 | Val mse = 0.6161
Epoch 48: Train mse = 0.3495 | Val mse = 0.6079
Epoch 49: Train mse = 0.3421 | Val mse = 0.6020
Epoch 50: Train mse = 0.3262 | Val mse = 0.5966
Using device: mps
Epoch 1: Train mse = 0.9950 | Val mse = 1.1095
Epoch 2: Train mse = 0.9179 | Val mse = 0.9480
Epoch 3: Train mse = 0.8571 | Val mse = 0.8984
Epoch 4: Train mse = 0.7745 | Val mse = 0.7978
Epoch 5: Train mse = 0.7361 | Val mse = 0.8109
Epoch 6: Train mse = 0.6988 | Val mse = 0.7795
Epoch 7: Train mse = 0.6378 | Val mse = 0.7459
Epoch 8: Train mse = 0.5500 | Val mse = 0.6649
Epoch 9: Train mse = 0.4716 | Val mse = 0.5968
Epoch 10: Train mse = 0.4013 | Val mse = 0.5028
Epoch 11: Train mse = 0.3427 | Val mse = 0.4719
Epoch 12: Train mse = 0.3139 | Val mse = 0.4313
Epoch 13: Train mse = 0.2980 | Val mse = 0.4001
Epoch 14: Train mse = 0.2863 | Val mse = 0.4160
Epoch 15: Train mse = 0.2813 | Val mse = 0.3893
Epoch 16: Train mse = 0.2789 | Val mse = 0.4276
Epoch 17: Train mse = 0.2720 | Val mse = 0.3727
Epoch 18: Train mse = 0.2588 | Val mse = 0.3894
Epoch 19: Train mse = 0.2468 | Val mse = 0.3996
Epoch 20: Train mse = 0.2369 | Val mse = 0.3639
Epoch 21: Train mse = 0.2280 | Val mse = 0.3668
Epoch 22: Train mse = 0.2235 | Val mse = 0.3745
Epoch 23: Train mse = 0.2206 | Val mse = 0.3728
Epoch 24: Train mse = 0.2092 | Val mse = 0.3879
Epoch 25: Train mse = 0.2044 | Val mse = 0.3712
Epoch 26: Train mse = 0.1970 | Val mse = 0.3760
Epoch 27: Train mse = 0.1944 | Val mse = 0.3875
Epoch 28: Train mse = 0.1884 | Val mse = 0.3818
Epoch 29: Train mse = 0.1847 | Val mse = 0.3859
Epoch 30: Train mse = 0.1815 | Val mse = 0.3938
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9956 | Val mse = 1.1108
Epoch 2: Train mse = 0.9177 | Val mse = 0.9536
Epoch 3: Train mse = 0.8616 | Val mse = 0.9026
Epoch 4: Train mse = 0.7769 | Val mse = 0.7976
Epoch 5: Train mse = 0.7398 | Val mse = 0.8092
Epoch 6: Train mse = 0.7040 | Val mse = 0.7838
Epoch 7: Train mse = 0.6496 | Val mse = 0.7480
Epoch 8: Train mse = 0.5691 | Val mse = 0.6640
Epoch 9: Train mse = 0.4840 | Val mse = 0.5972
Epoch 10: Train mse = 0.4067 | Val mse = 0.5006
Epoch 11: Train mse = 0.3492 | Val mse = 0.4449
Epoch 12: Train mse = 0.3117 | Val mse = 0.4251
Epoch 13: Train mse = 0.2975 | Val mse = 0.3779
Epoch 14: Train mse = 0.2815 | Val mse = 0.3813
Epoch 15: Train mse = 0.2730 | Val mse = 0.3524
Epoch 16: Train mse = 0.2588 | Val mse = 0.3724
Epoch 17: Train mse = 0.2526 | Val mse = 0.3579
Epoch 18: Train mse = 0.2391 | Val mse = 0.3488
Epoch 19: Train mse = 0.2336 | Val mse = 0.3599
Epoch 20: Train mse = 0.2254 | Val mse = 0.3500
Epoch 21: Train mse = 0.2177 | Val mse = 0.3710
Epoch 22: Train mse = 0.2137 | Val mse = 0.3685
Epoch 23: Train mse = 0.2150 | Val mse = 0.3600
Epoch 24: Train mse = 0.2037 | Val mse = 0.3696
Epoch 25: Train mse = 0.2016 | Val mse = 0.3672
Epoch 26: Train mse = 0.1958 | Val mse = 0.3565
Epoch 27: Train mse = 0.1927 | Val mse = 0.3697
Epoch 28: Train mse = 0.1872 | Val mse = 0.3704
Epoch 29: Train mse = 0.1842 | Val mse = 0.3618
Epoch 30: Train mse = 0.1820 | Val mse = 0.3647
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 1.0012 | Val mse = 1.2192
Epoch 2: Train mse = 0.9915 | Val mse = 1.2008
Epoch 3: Train mse = 0.9809 | Val mse = 1.1677
Epoch 4: Train mse = 0.9543 | Val mse = 1.1097
Epoch 5: Train mse = 0.9311 | Val mse = 1.0348
Epoch 6: Train mse = 0.9117 | Val mse = 0.9799
Epoch 7: Train mse = 0.8989 | Val mse = 0.9519
Epoch 8: Train mse = 0.8723 | Val mse = 0.9308
Epoch 9: Train mse = 0.8520 | Val mse = 0.9020
Epoch 10: Train mse = 0.8311 | Val mse = 0.8659
Epoch 11: Train mse = 0.8073 | Val mse = 0.8298
Epoch 12: Train mse = 0.7741 | Val mse = 0.8058
Epoch 13: Train mse = 0.7509 | Val mse = 0.7848
Epoch 14: Train mse = 0.7236 | Val mse = 0.7598
Epoch 15: Train mse = 0.6911 | Val mse = 0.7319
Epoch 16: Train mse = 0.6645 | Val mse = 0.7028
Epoch 17: Train mse = 0.6289 | Val mse = 0.6764
Epoch 18: Train mse = 0.5988 | Val mse = 0.6532
Epoch 19: Train mse = 0.5767 | Val mse = 0.6284
Epoch 20: Train mse = 0.5462 | Val mse = 0.6065
Epoch 21: Train mse = 0.5171 | Val mse = 0.5840
Epoch 22: Train mse = 0.4943 | Val mse = 0.5574
Epoch 23: Train mse = 0.4714 | Val mse = 0.5333
Epoch 24: Train mse = 0.4479 | Val mse = 0.5113
Epoch 25: Train mse = 0.4203 | Val mse = 0.4867
Epoch 26: Train mse = 0.3972 | Val mse = 0.4677
Epoch 27: Train mse = 0.3841 | Val mse = 0.4524
Epoch 28: Train mse = 0.3665 | Val mse = 0.4327
Epoch 29: Train mse = 0.3554 | Val mse = 0.4291
Epoch 30: Train mse = 0.3412 | Val mse = 0.4162
Epoch 31: Train mse = 0.3336 | Val mse = 0.4051
Epoch 32: Train mse = 0.3238 | Val mse = 0.3997
Epoch 33: Train mse = 0.3186 | Val mse = 0.3926
Epoch 34: Train mse = 0.3071 | Val mse = 0.3869
Epoch 35: Train mse = 0.3041 | Val mse = 0.3833
Epoch 36: Train mse = 0.3010 | Val mse = 0.3786
Epoch 37: Train mse = 0.2977 | Val mse = 0.3742
Epoch 38: Train mse = 0.2974 | Val mse = 0.3750
Epoch 39: Train mse = 0.2875 | Val mse = 0.3695
Epoch 40: Train mse = 0.2842 | Val mse = 0.3676
Epoch 41: Train mse = 0.2801 | Val mse = 0.3625
Epoch 42: Train mse = 0.2764 | Val mse = 0.3630
Epoch 43: Train mse = 0.2724 | Val mse = 0.3622
Epoch 44: Train mse = 0.2729 | Val mse = 0.3590
Epoch 45: Train mse = 0.2688 | Val mse = 0.3579
Epoch 46: Train mse = 0.2670 | Val mse = 0.3553
Epoch 47: Train mse = 0.2587 | Val mse = 0.3541
Epoch 48: Train mse = 0.2588 | Val mse = 0.3546
Epoch 49: Train mse = 0.2568 | Val mse = 0.3517
Epoch 50: Train mse = 0.2547 | Val mse = 0.3513
Using device: mps
Epoch 1: Train mse = 1.0013 | Val mse = 1.2192
Epoch 2: Train mse = 0.9919 | Val mse = 1.2011
Epoch 3: Train mse = 0.9816 | Val mse = 1.1690
Epoch 4: Train mse = 0.9554 | Val mse = 1.1122
Epoch 5: Train mse = 0.9324 | Val mse = 1.0380
Epoch 6: Train mse = 0.9131 | Val mse = 0.9835
Epoch 7: Train mse = 0.9006 | Val mse = 0.9562
Epoch 8: Train mse = 0.8745 | Val mse = 0.9351
Epoch 9: Train mse = 0.8553 | Val mse = 0.9059
Epoch 10: Train mse = 0.8357 | Val mse = 0.8695
Epoch 11: Train mse = 0.8132 | Val mse = 0.8335
Epoch 12: Train mse = 0.7807 | Val mse = 0.8088
Epoch 13: Train mse = 0.7580 | Val mse = 0.7873
Epoch 14: Train mse = 0.7307 | Val mse = 0.7622
Epoch 15: Train mse = 0.6985 | Val mse = 0.7347
Epoch 16: Train mse = 0.6727 | Val mse = 0.7060
Epoch 17: Train mse = 0.6380 | Val mse = 0.6804
Epoch 18: Train mse = 0.6084 | Val mse = 0.6583
Epoch 19: Train mse = 0.5868 | Val mse = 0.6342
Epoch 20: Train mse = 0.5569 | Val mse = 0.6124
Epoch 21: Train mse = 0.5284 | Val mse = 0.5898
Epoch 22: Train mse = 0.5064 | Val mse = 0.5629
Epoch 23: Train mse = 0.4836 | Val mse = 0.5386
Epoch 24: Train mse = 0.4604 | Val mse = 0.5162
Epoch 25: Train mse = 0.4323 | Val mse = 0.4911
Epoch 26: Train mse = 0.4087 | Val mse = 0.4714
Epoch 27: Train mse = 0.3950 | Val mse = 0.4550
Epoch 28: Train mse = 0.3765 | Val mse = 0.4344
Epoch 29: Train mse = 0.3647 | Val mse = 0.4301
Epoch 30: Train mse = 0.3492 | Val mse = 0.4163
Epoch 31: Train mse = 0.3408 | Val mse = 0.4042
Epoch 32: Train mse = 0.3302 | Val mse = 0.3981
Epoch 33: Train mse = 0.3243 | Val mse = 0.3905
Epoch 34: Train mse = 0.3124 | Val mse = 0.3846
Epoch 35: Train mse = 0.3094 | Val mse = 0.3808
Epoch 36: Train mse = 0.3062 | Val mse = 0.3759
Epoch 37: Train mse = 0.3030 | Val mse = 0.3708
Epoch 38: Train mse = 0.3028 | Val mse = 0.3717
Epoch 39: Train mse = 0.2928 | Val mse = 0.3658
Epoch 40: Train mse = 0.2897 | Val mse = 0.3639
Epoch 41: Train mse = 0.2856 | Val mse = 0.3588
Epoch 42: Train mse = 0.2818 | Val mse = 0.3591
Epoch 43: Train mse = 0.2778 | Val mse = 0.3579
Epoch 44: Train mse = 0.2788 | Val mse = 0.3547
Epoch 45: Train mse = 0.2746 | Val mse = 0.3536
Epoch 46: Train mse = 0.2731 | Val mse = 0.3505
Epoch 47: Train mse = 0.2647 | Val mse = 0.3489
Epoch 48: Train mse = 0.2650 | Val mse = 0.3497
Epoch 49: Train mse = 0.2630 | Val mse = 0.3465
Epoch 50: Train mse = 0.2610 | Val mse = 0.3457
Using device: mps
Epoch 1: Train mse = 0.9624 | Val mse = 0.9216
Epoch 2: Train mse = 0.8226 | Val mse = 0.7913
Epoch 3: Train mse = 0.6864 | Val mse = 0.6668
Epoch 4: Train mse = 0.5513 | Val mse = 0.5875
Epoch 5: Train mse = 0.4435 | Val mse = 0.4673
Epoch 6: Train mse = 0.3682 | Val mse = 0.4035
Epoch 7: Train mse = 0.3283 | Val mse = 0.3683
Epoch 8: Train mse = 0.2967 | Val mse = 0.3531
Epoch 9: Train mse = 0.2806 | Val mse = 0.3451
Epoch 10: Train mse = 0.2651 | Val mse = 0.3320
Epoch 11: Train mse = 0.2535 | Val mse = 0.3478
Epoch 12: Train mse = 0.2438 | Val mse = 0.3488
Epoch 13: Train mse = 0.2361 | Val mse = 0.3471
Epoch 14: Train mse = 0.2245 | Val mse = 0.3472
Epoch 15: Train mse = 0.2205 | Val mse = 0.3639
Epoch 16: Train mse = 0.2127 | Val mse = 0.3621
Epoch 17: Train mse = 0.2042 | Val mse = 0.3720
Epoch 18: Train mse = 0.1988 | Val mse = 0.3727
Epoch 19: Train mse = 0.1915 | Val mse = 0.3981
Epoch 20: Train mse = 0.1870 | Val mse = 0.3898
Epoch 21: Train mse = 0.1797 | Val mse = 0.3911
Epoch 22: Train mse = 0.1775 | Val mse = 0.3903
Epoch 23: Train mse = 0.1728 | Val mse = 0.4091
Epoch 24: Train mse = 0.1679 | Val mse = 0.3931
Epoch 25: Train mse = 0.1596 | Val mse = 0.3979
Epoch 26: Train mse = 0.1528 | Val mse = 0.4080
Epoch 27: Train mse = 0.1516 | Val mse = 0.4086
Epoch 28: Train mse = 0.1458 | Val mse = 0.4118
Epoch 29: Train mse = 0.1426 | Val mse = 0.4090
Epoch 30: Train mse = 0.1375 | Val mse = 0.4209
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9635 | Val mse = 0.9267
Epoch 2: Train mse = 0.8269 | Val mse = 0.7940
Epoch 3: Train mse = 0.6933 | Val mse = 0.6728
Epoch 4: Train mse = 0.5617 | Val mse = 0.5967
Epoch 5: Train mse = 0.4551 | Val mse = 0.4740
Epoch 6: Train mse = 0.3772 | Val mse = 0.4059
Epoch 7: Train mse = 0.3327 | Val mse = 0.3681
Epoch 8: Train mse = 0.3015 | Val mse = 0.3503
Epoch 9: Train mse = 0.2846 | Val mse = 0.3389
Epoch 10: Train mse = 0.2689 | Val mse = 0.3303
Epoch 11: Train mse = 0.2582 | Val mse = 0.3465
Epoch 12: Train mse = 0.2497 | Val mse = 0.3487
Epoch 13: Train mse = 0.2419 | Val mse = 0.3415
Epoch 14: Train mse = 0.2303 | Val mse = 0.3462
Epoch 15: Train mse = 0.2262 | Val mse = 0.3574
Epoch 16: Train mse = 0.2181 | Val mse = 0.3568
Epoch 17: Train mse = 0.2104 | Val mse = 0.3621
Epoch 18: Train mse = 0.2043 | Val mse = 0.3706
Epoch 19: Train mse = 0.1987 | Val mse = 0.3904
Epoch 20: Train mse = 0.1937 | Val mse = 0.3830
Epoch 21: Train mse = 0.1874 | Val mse = 0.3913
Epoch 22: Train mse = 0.1855 | Val mse = 0.3925
Epoch 23: Train mse = 0.1817 | Val mse = 0.4017
Epoch 24: Train mse = 0.1766 | Val mse = 0.3922
Epoch 25: Train mse = 0.1690 | Val mse = 0.3979
Epoch 26: Train mse = 0.1624 | Val mse = 0.4017
Epoch 27: Train mse = 0.1608 | Val mse = 0.4134
Epoch 28: Train mse = 0.1554 | Val mse = 0.4079
Epoch 29: Train mse = 0.1520 | Val mse = 0.4076
Epoch 30: Train mse = 0.1465 | Val mse = 0.4128
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9927 | Val mse = 1.2175
Epoch 2: Train mse = 0.9853 | Val mse = 1.1984
Epoch 3: Train mse = 0.9728 | Val mse = 1.1593
Epoch 4: Train mse = 0.9517 | Val mse = 1.0927
Epoch 5: Train mse = 0.9245 | Val mse = 1.0070
Epoch 6: Train mse = 0.9067 | Val mse = 0.9420
Epoch 7: Train mse = 0.8813 | Val mse = 0.9190
Epoch 8: Train mse = 0.8721 | Val mse = 0.9085
Epoch 9: Train mse = 0.8464 | Val mse = 0.8949
Epoch 10: Train mse = 0.8268 | Val mse = 0.8656
Epoch 11: Train mse = 0.7954 | Val mse = 0.8289
Epoch 12: Train mse = 0.7659 | Val mse = 0.7990
Epoch 13: Train mse = 0.7397 | Val mse = 0.7745
Epoch 14: Train mse = 0.7038 | Val mse = 0.7476
Epoch 15: Train mse = 0.6773 | Val mse = 0.7154
Epoch 16: Train mse = 0.6356 | Val mse = 0.6836
Epoch 17: Train mse = 0.6061 | Val mse = 0.6580
Epoch 18: Train mse = 0.5701 | Val mse = 0.6354
Epoch 19: Train mse = 0.5346 | Val mse = 0.6117
Epoch 20: Train mse = 0.5109 | Val mse = 0.5900
Epoch 21: Train mse = 0.4777 | Val mse = 0.5686
Epoch 22: Train mse = 0.4483 | Val mse = 0.5440
Epoch 23: Train mse = 0.4212 | Val mse = 0.5215
Epoch 24: Train mse = 0.3998 | Val mse = 0.5005
Epoch 25: Train mse = 0.3796 | Val mse = 0.4770
Epoch 26: Train mse = 0.3566 | Val mse = 0.4631
Epoch 27: Train mse = 0.3337 | Val mse = 0.4474
Epoch 28: Train mse = 0.3243 | Val mse = 0.4340
Epoch 29: Train mse = 0.3114 | Val mse = 0.4256
Epoch 30: Train mse = 0.3009 | Val mse = 0.4163
Epoch 31: Train mse = 0.2945 | Val mse = 0.4088
Epoch 32: Train mse = 0.2881 | Val mse = 0.4055
Epoch 33: Train mse = 0.2804 | Val mse = 0.4003
Epoch 34: Train mse = 0.2731 | Val mse = 0.3961
Epoch 35: Train mse = 0.2642 | Val mse = 0.3943
Epoch 36: Train mse = 0.2630 | Val mse = 0.3904
Epoch 37: Train mse = 0.2548 | Val mse = 0.3873
Epoch 38: Train mse = 0.2498 | Val mse = 0.3855
Epoch 39: Train mse = 0.2457 | Val mse = 0.3845
Epoch 40: Train mse = 0.2415 | Val mse = 0.3849
Epoch 41: Train mse = 0.2325 | Val mse = 0.3824
Epoch 42: Train mse = 0.2311 | Val mse = 0.3796
Epoch 43: Train mse = 0.2273 | Val mse = 0.3806
Epoch 44: Train mse = 0.2250 | Val mse = 0.3792
Epoch 45: Train mse = 0.2201 | Val mse = 0.3800
Epoch 46: Train mse = 0.2144 | Val mse = 0.3809
Epoch 47: Train mse = 0.2141 | Val mse = 0.3792
Epoch 48: Train mse = 0.2117 | Val mse = 0.3819
Epoch 49: Train mse = 0.2047 | Val mse = 0.3814
Epoch 50: Train mse = 0.2035 | Val mse = 0.3813
Using device: mps
Epoch 1: Train mse = 0.9928 | Val mse = 1.2177
Epoch 2: Train mse = 0.9858 | Val mse = 1.1994
Epoch 3: Train mse = 0.9735 | Val mse = 1.1614
Epoch 4: Train mse = 0.9526 | Val mse = 1.0952
Epoch 5: Train mse = 0.9252 | Val mse = 1.0084
Epoch 6: Train mse = 0.9074 | Val mse = 0.9429
Epoch 7: Train mse = 0.8820 | Val mse = 0.9205
Epoch 8: Train mse = 0.8731 | Val mse = 0.9103
Epoch 9: Train mse = 0.8479 | Val mse = 0.8962
Epoch 10: Train mse = 0.8285 | Val mse = 0.8662
Epoch 11: Train mse = 0.7972 | Val mse = 0.8291
Epoch 12: Train mse = 0.7676 | Val mse = 0.7995
Epoch 13: Train mse = 0.7418 | Val mse = 0.7753
Epoch 14: Train mse = 0.7064 | Val mse = 0.7482
Epoch 15: Train mse = 0.6801 | Val mse = 0.7157
Epoch 16: Train mse = 0.6381 | Val mse = 0.6835
Epoch 17: Train mse = 0.6084 | Val mse = 0.6577
Epoch 18: Train mse = 0.5729 | Val mse = 0.6351
Epoch 19: Train mse = 0.5382 | Val mse = 0.6112
Epoch 20: Train mse = 0.5149 | Val mse = 0.5894
Epoch 21: Train mse = 0.4824 | Val mse = 0.5680
Epoch 22: Train mse = 0.4530 | Val mse = 0.5429
Epoch 23: Train mse = 0.4264 | Val mse = 0.5196
Epoch 24: Train mse = 0.4050 | Val mse = 0.4983
Epoch 25: Train mse = 0.3849 | Val mse = 0.4731
Epoch 26: Train mse = 0.3620 | Val mse = 0.4588
Epoch 27: Train mse = 0.3388 | Val mse = 0.4433
Epoch 28: Train mse = 0.3298 | Val mse = 0.4291
Epoch 29: Train mse = 0.3172 | Val mse = 0.4208
Epoch 30: Train mse = 0.3069 | Val mse = 0.4119
Epoch 31: Train mse = 0.3008 | Val mse = 0.4042
Epoch 32: Train mse = 0.2949 | Val mse = 0.4008
Epoch 33: Train mse = 0.2876 | Val mse = 0.3960
Epoch 34: Train mse = 0.2806 | Val mse = 0.3913
Epoch 35: Train mse = 0.2719 | Val mse = 0.3898
Epoch 36: Train mse = 0.2712 | Val mse = 0.3846
Epoch 37: Train mse = 0.2630 | Val mse = 0.3820
Epoch 38: Train mse = 0.2583 | Val mse = 0.3782
Epoch 39: Train mse = 0.2550 | Val mse = 0.3776
Epoch 40: Train mse = 0.2510 | Val mse = 0.3760
Epoch 41: Train mse = 0.2418 | Val mse = 0.3760
Epoch 42: Train mse = 0.2410 | Val mse = 0.3704
Epoch 43: Train mse = 0.2370 | Val mse = 0.3719
Epoch 44: Train mse = 0.2354 | Val mse = 0.3686
Epoch 45: Train mse = 0.2304 | Val mse = 0.3700
Epoch 46: Train mse = 0.2239 | Val mse = 0.3703
Epoch 47: Train mse = 0.2241 | Val mse = 0.3679
Epoch 48: Train mse = 0.2212 | Val mse = 0.3709
Epoch 49: Train mse = 0.2137 | Val mse = 0.3697
Epoch 50: Train mse = 0.2128 | Val mse = 0.3706
Using device: mps
Epoch 1: Train mse = 0.9518 | Val mse = 0.8928
Epoch 2: Train mse = 0.8219 | Val mse = 0.8095
Epoch 3: Train mse = 0.6658 | Val mse = 0.6459
Epoch 4: Train mse = 0.5235 | Val mse = 0.5363
Epoch 5: Train mse = 0.4001 | Val mse = 0.4471
Epoch 6: Train mse = 0.3464 | Val mse = 0.3974
Epoch 7: Train mse = 0.3090 | Val mse = 0.3873
Epoch 8: Train mse = 0.2810 | Val mse = 0.3796
Epoch 9: Train mse = 0.2581 | Val mse = 0.3562
Epoch 10: Train mse = 0.2409 | Val mse = 0.3584
Epoch 11: Train mse = 0.2247 | Val mse = 0.3580
Epoch 12: Train mse = 0.2095 | Val mse = 0.3567
Epoch 13: Train mse = 0.1999 | Val mse = 0.3575
Epoch 14: Train mse = 0.1879 | Val mse = 0.3655
Epoch 15: Train mse = 0.1823 | Val mse = 0.3669
Epoch 16: Train mse = 0.1724 | Val mse = 0.3635
Epoch 17: Train mse = 0.1655 | Val mse = 0.3692
Epoch 18: Train mse = 0.1600 | Val mse = 0.3770
Epoch 19: Train mse = 0.1515 | Val mse = 0.3797
Epoch 20: Train mse = 0.1445 | Val mse = 0.3762
Epoch 21: Train mse = 0.1377 | Val mse = 0.3821
Epoch 22: Train mse = 0.1289 | Val mse = 0.3875
Epoch 23: Train mse = 0.1233 | Val mse = 0.3852
Epoch 24: Train mse = 0.1176 | Val mse = 0.3922
Epoch 25: Train mse = 0.1135 | Val mse = 0.3986
Epoch 26: Train mse = 0.1104 | Val mse = 0.4166
Epoch 27: Train mse = 0.1077 | Val mse = 0.4142
Epoch 28: Train mse = 0.1048 | Val mse = 0.4169
Epoch 29: Train mse = 0.1047 | Val mse = 0.4146
Epoch 30: Train mse = 0.1021 | Val mse = 0.4268
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9527 | Val mse = 0.8959
Epoch 2: Train mse = 0.8236 | Val mse = 0.8103
Epoch 3: Train mse = 0.6698 | Val mse = 0.6513
Epoch 4: Train mse = 0.5301 | Val mse = 0.5418
Epoch 5: Train mse = 0.4072 | Val mse = 0.4557
Epoch 6: Train mse = 0.3505 | Val mse = 0.3962
Epoch 7: Train mse = 0.3143 | Val mse = 0.3836
Epoch 8: Train mse = 0.2853 | Val mse = 0.3745
Epoch 9: Train mse = 0.2642 | Val mse = 0.3488
Epoch 10: Train mse = 0.2475 | Val mse = 0.3508
Epoch 11: Train mse = 0.2316 | Val mse = 0.3517
Epoch 12: Train mse = 0.2157 | Val mse = 0.3501
Epoch 13: Train mse = 0.2059 | Val mse = 0.3485
Epoch 14: Train mse = 0.1938 | Val mse = 0.3538
Epoch 15: Train mse = 0.1876 | Val mse = 0.3550
Epoch 16: Train mse = 0.1767 | Val mse = 0.3675
Epoch 17: Train mse = 0.1697 | Val mse = 0.3643
Epoch 18: Train mse = 0.1641 | Val mse = 0.3729
Epoch 19: Train mse = 0.1561 | Val mse = 0.3754
Epoch 20: Train mse = 0.1503 | Val mse = 0.3681
Epoch 21: Train mse = 0.1439 | Val mse = 0.3887
Epoch 22: Train mse = 0.1366 | Val mse = 0.3891
Epoch 23: Train mse = 0.1323 | Val mse = 0.3830
Epoch 24: Train mse = 0.1244 | Val mse = 0.4009
Epoch 25: Train mse = 0.1196 | Val mse = 0.3922
Epoch 26: Train mse = 0.1140 | Val mse = 0.4062
Epoch 27: Train mse = 0.1095 | Val mse = 0.4115
Epoch 28: Train mse = 0.1053 | Val mse = 0.4065
Epoch 29: Train mse = 0.1031 | Val mse = 0.4101
Epoch 30: Train mse = 0.0992 | Val mse = 0.4156
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9936 | Val mse = 1.2283
Epoch 2: Train mse = 0.9982 | Val mse = 1.2186
Epoch 3: Train mse = 0.9889 | Val mse = 1.1945
Epoch 4: Train mse = 0.9754 | Val mse = 1.1352
Epoch 5: Train mse = 0.9427 | Val mse = 1.0310
Epoch 6: Train mse = 0.9151 | Val mse = 0.9591
Epoch 7: Train mse = 0.8997 | Val mse = 0.9516
Epoch 8: Train mse = 0.8907 | Val mse = 0.9536
Epoch 9: Train mse = 0.8797 | Val mse = 0.9493
Epoch 10: Train mse = 0.8693 | Val mse = 0.9431
Epoch 11: Train mse = 0.8649 | Val mse = 0.9383
Epoch 12: Train mse = 0.8560 | Val mse = 0.9285
Epoch 13: Train mse = 0.8502 | Val mse = 0.9227
Epoch 14: Train mse = 0.8482 | Val mse = 0.9166
Epoch 15: Train mse = 0.8393 | Val mse = 0.9012
Epoch 16: Train mse = 0.8234 | Val mse = 0.8818
Epoch 17: Train mse = 0.7981 | Val mse = 0.8554
Epoch 18: Train mse = 0.7651 | Val mse = 0.8176
Epoch 19: Train mse = 0.7409 | Val mse = 0.7946
Epoch 20: Train mse = 0.7263 | Val mse = 0.7859
Epoch 21: Train mse = 0.7165 | Val mse = 0.7875
Epoch 22: Train mse = 0.6963 | Val mse = 0.7774
Epoch 23: Train mse = 0.6756 | Val mse = 0.7477
Epoch 24: Train mse = 0.6490 | Val mse = 0.7153
Epoch 25: Train mse = 0.6175 | Val mse = 0.6792
Epoch 26: Train mse = 0.5809 | Val mse = 0.6465
Epoch 27: Train mse = 0.5449 | Val mse = 0.6219
Epoch 28: Train mse = 0.5169 | Val mse = 0.6003
Epoch 29: Train mse = 0.4917 | Val mse = 0.5828
Epoch 30: Train mse = 0.4577 | Val mse = 0.5568
Epoch 31: Train mse = 0.4279 | Val mse = 0.5254
Epoch 32: Train mse = 0.4009 | Val mse = 0.4989
Epoch 33: Train mse = 0.3717 | Val mse = 0.4672
Epoch 34: Train mse = 0.3540 | Val mse = 0.4575
Epoch 35: Train mse = 0.3344 | Val mse = 0.4315
Epoch 36: Train mse = 0.3226 | Val mse = 0.4204
Epoch 37: Train mse = 0.3081 | Val mse = 0.4034
Epoch 38: Train mse = 0.3005 | Val mse = 0.3943
Epoch 39: Train mse = 0.2926 | Val mse = 0.3988
Epoch 40: Train mse = 0.2845 | Val mse = 0.3819
Epoch 41: Train mse = 0.2802 | Val mse = 0.3831
Epoch 42: Train mse = 0.2788 | Val mse = 0.3785
Epoch 43: Train mse = 0.2714 | Val mse = 0.3722
Epoch 44: Train mse = 0.2695 | Val mse = 0.3635
Epoch 45: Train mse = 0.2645 | Val mse = 0.3684
Epoch 46: Train mse = 0.2607 | Val mse = 0.3603
Epoch 47: Train mse = 0.2571 | Val mse = 0.3600
Epoch 48: Train mse = 0.2550 | Val mse = 0.3570
Epoch 49: Train mse = 0.2540 | Val mse = 0.3572
Epoch 50: Train mse = 0.2472 | Val mse = 0.3603
Using device: mps
Epoch 1: Train mse = 0.9936 | Val mse = 1.2284
Epoch 2: Train mse = 0.9983 | Val mse = 1.2189
Epoch 3: Train mse = 0.9895 | Val mse = 1.1963
Epoch 4: Train mse = 0.9770 | Val mse = 1.1401
Epoch 5: Train mse = 0.9448 | Val mse = 1.0356
Epoch 6: Train mse = 0.9163 | Val mse = 0.9591
Epoch 7: Train mse = 0.9005 | Val mse = 0.9510
Epoch 8: Train mse = 0.8914 | Val mse = 0.9532
Epoch 9: Train mse = 0.8803 | Val mse = 0.9492
Epoch 10: Train mse = 0.8706 | Val mse = 0.9438
Epoch 11: Train mse = 0.8668 | Val mse = 0.9401
Epoch 12: Train mse = 0.8581 | Val mse = 0.9306
Epoch 13: Train mse = 0.8524 | Val mse = 0.9236
Epoch 14: Train mse = 0.8509 | Val mse = 0.9184
Epoch 15: Train mse = 0.8434 | Val mse = 0.9049
Epoch 16: Train mse = 0.8302 | Val mse = 0.8874
Epoch 17: Train mse = 0.8087 | Val mse = 0.8638
Epoch 18: Train mse = 0.7778 | Val mse = 0.8264
Epoch 19: Train mse = 0.7495 | Val mse = 0.7995
Epoch 20: Train mse = 0.7328 | Val mse = 0.7842
Epoch 21: Train mse = 0.7235 | Val mse = 0.7859
Epoch 22: Train mse = 0.7047 | Val mse = 0.7813
Epoch 23: Train mse = 0.6861 | Val mse = 0.7559
Epoch 24: Train mse = 0.6630 | Val mse = 0.7253
Epoch 25: Train mse = 0.6345 | Val mse = 0.6896
Epoch 26: Train mse = 0.5969 | Val mse = 0.6537
Epoch 27: Train mse = 0.5600 | Val mse = 0.6282
Epoch 28: Train mse = 0.5326 | Val mse = 0.6032
Epoch 29: Train mse = 0.5072 | Val mse = 0.5868
Epoch 30: Train mse = 0.4738 | Val mse = 0.5629
Epoch 31: Train mse = 0.4442 | Val mse = 0.5332
Epoch 32: Train mse = 0.4167 | Val mse = 0.5046
Epoch 33: Train mse = 0.3864 | Val mse = 0.4729
Epoch 34: Train mse = 0.3674 | Val mse = 0.4603
Epoch 35: Train mse = 0.3462 | Val mse = 0.4349
Epoch 36: Train mse = 0.3328 | Val mse = 0.4209
Epoch 37: Train mse = 0.3175 | Val mse = 0.4047
Epoch 38: Train mse = 0.3085 | Val mse = 0.3924
Epoch 39: Train mse = 0.3003 | Val mse = 0.3966
Epoch 40: Train mse = 0.2920 | Val mse = 0.3796
Epoch 41: Train mse = 0.2872 | Val mse = 0.3804
Epoch 42: Train mse = 0.2859 | Val mse = 0.3769
Epoch 43: Train mse = 0.2780 | Val mse = 0.3703
Epoch 44: Train mse = 0.2763 | Val mse = 0.3609
Epoch 45: Train mse = 0.2709 | Val mse = 0.3672
Epoch 46: Train mse = 0.2672 | Val mse = 0.3560
Epoch 47: Train mse = 0.2637 | Val mse = 0.3586
Epoch 48: Train mse = 0.2614 | Val mse = 0.3530
Epoch 49: Train mse = 0.2609 | Val mse = 0.3527
Epoch 50: Train mse = 0.2539 | Val mse = 0.3566
Using device: mps
Epoch 1: Train mse = 0.9670 | Val mse = 0.9504
Epoch 2: Train mse = 0.8856 | Val mse = 0.9804
Epoch 3: Train mse = 0.8273 | Val mse = 0.8402
Epoch 4: Train mse = 0.7315 | Val mse = 0.7376
Epoch 5: Train mse = 0.6258 | Val mse = 0.6752
Epoch 6: Train mse = 0.5374 | Val mse = 0.5965
Epoch 7: Train mse = 0.4317 | Val mse = 0.4662
Epoch 8: Train mse = 0.3520 | Val mse = 0.4094
Epoch 9: Train mse = 0.3169 | Val mse = 0.3744
Epoch 10: Train mse = 0.2972 | Val mse = 0.3555
Epoch 11: Train mse = 0.2862 | Val mse = 0.3700
Epoch 12: Train mse = 0.2736 | Val mse = 0.3342
Epoch 13: Train mse = 0.2781 | Val mse = 0.3442
Epoch 14: Train mse = 0.2704 | Val mse = 0.3453
Epoch 15: Train mse = 0.2632 | Val mse = 0.3937
Epoch 16: Train mse = 0.2617 | Val mse = 0.3356
Epoch 17: Train mse = 0.2506 | Val mse = 0.3289
Epoch 18: Train mse = 0.2408 | Val mse = 0.3314
Epoch 19: Train mse = 0.2354 | Val mse = 0.3346
Epoch 20: Train mse = 0.2262 | Val mse = 0.3326
Epoch 21: Train mse = 0.2250 | Val mse = 0.3349
Epoch 22: Train mse = 0.2159 | Val mse = 0.3452
Epoch 23: Train mse = 0.2158 | Val mse = 0.3500
Epoch 24: Train mse = 0.2106 | Val mse = 0.3347
Epoch 25: Train mse = 0.2072 | Val mse = 0.3418
Epoch 26: Train mse = 0.2035 | Val mse = 0.3506
Epoch 27: Train mse = 0.2000 | Val mse = 0.3450
Epoch 28: Train mse = 0.1964 | Val mse = 0.3603
Epoch 29: Train mse = 0.1986 | Val mse = 0.3568
Epoch 30: Train mse = 0.1941 | Val mse = 0.3545
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9676 | Val mse = 0.9500
Epoch 2: Train mse = 0.8866 | Val mse = 0.9816
Epoch 3: Train mse = 0.8365 | Val mse = 0.8529
Epoch 4: Train mse = 0.7412 | Val mse = 0.7380
Epoch 5: Train mse = 0.6355 | Val mse = 0.6927
Epoch 6: Train mse = 0.5522 | Val mse = 0.6008
Epoch 7: Train mse = 0.4499 | Val mse = 0.4806
Epoch 8: Train mse = 0.3642 | Val mse = 0.4055
Epoch 9: Train mse = 0.3225 | Val mse = 0.3773
Epoch 10: Train mse = 0.3009 | Val mse = 0.3503
Epoch 11: Train mse = 0.2881 | Val mse = 0.3576
Epoch 12: Train mse = 0.2728 | Val mse = 0.3294
Epoch 13: Train mse = 0.2702 | Val mse = 0.3282
Epoch 14: Train mse = 0.2672 | Val mse = 0.3190
Epoch 15: Train mse = 0.2581 | Val mse = 0.3256
Epoch 16: Train mse = 0.2538 | Val mse = 0.3569
Epoch 17: Train mse = 0.2493 | Val mse = 0.3440
Epoch 18: Train mse = 0.2412 | Val mse = 0.3414
Epoch 19: Train mse = 0.2361 | Val mse = 0.3409
Epoch 20: Train mse = 0.2303 | Val mse = 0.3432
Epoch 21: Train mse = 0.2279 | Val mse = 0.3335
Epoch 22: Train mse = 0.2188 | Val mse = 0.3507
Epoch 23: Train mse = 0.2175 | Val mse = 0.3614
Epoch 24: Train mse = 0.2126 | Val mse = 0.3556
Epoch 25: Train mse = 0.2101 | Val mse = 0.3473
Epoch 26: Train mse = 0.2069 | Val mse = 0.3534
Epoch 27: Train mse = 0.2037 | Val mse = 0.3591
Epoch 28: Train mse = 0.2023 | Val mse = 0.3616
Epoch 29: Train mse = 0.2032 | Val mse = 0.3522
Epoch 30: Train mse = 0.1963 | Val mse = 0.3619
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 1.0033 | Val mse = 1.2245
Epoch 2: Train mse = 1.0001 | Val mse = 1.2186
Epoch 3: Train mse = 0.9911 | Val mse = 1.1926
Epoch 4: Train mse = 0.9720 | Val mse = 1.1295
Epoch 5: Train mse = 0.9411 | Val mse = 1.0364
Epoch 6: Train mse = 0.9114 | Val mse = 0.9608
Epoch 7: Train mse = 0.9069 | Val mse = 0.9505
Epoch 8: Train mse = 0.8905 | Val mse = 0.9586
Epoch 9: Train mse = 0.8784 | Val mse = 0.9482
Epoch 10: Train mse = 0.8633 | Val mse = 0.9327
Epoch 11: Train mse = 0.8556 | Val mse = 0.9191
Epoch 12: Train mse = 0.8461 | Val mse = 0.9150
Epoch 13: Train mse = 0.8450 | Val mse = 0.9116
Epoch 14: Train mse = 0.8342 | Val mse = 0.9002
Epoch 15: Train mse = 0.8226 | Val mse = 0.8909
Epoch 16: Train mse = 0.7979 | Val mse = 0.8656
Epoch 17: Train mse = 0.7730 | Val mse = 0.8392
Epoch 18: Train mse = 0.7467 | Val mse = 0.8092
Epoch 19: Train mse = 0.7309 | Val mse = 0.7957
Epoch 20: Train mse = 0.7147 | Val mse = 0.7918
Epoch 21: Train mse = 0.6991 | Val mse = 0.7904
Epoch 22: Train mse = 0.6871 | Val mse = 0.7808
Epoch 23: Train mse = 0.6699 | Val mse = 0.7743
Epoch 24: Train mse = 0.6550 | Val mse = 0.7533
Epoch 25: Train mse = 0.6332 | Val mse = 0.7451
Epoch 26: Train mse = 0.6106 | Val mse = 0.7278
Epoch 27: Train mse = 0.5810 | Val mse = 0.7117
Epoch 28: Train mse = 0.5530 | Val mse = 0.6824
Epoch 29: Train mse = 0.5215 | Val mse = 0.6589
Epoch 30: Train mse = 0.4873 | Val mse = 0.6348
Epoch 31: Train mse = 0.4520 | Val mse = 0.6055
Epoch 32: Train mse = 0.4212 | Val mse = 0.5853
Epoch 33: Train mse = 0.3878 | Val mse = 0.5761
Epoch 34: Train mse = 0.3566 | Val mse = 0.5528
Epoch 35: Train mse = 0.3334 | Val mse = 0.5504
Epoch 36: Train mse = 0.3125 | Val mse = 0.5280
Epoch 37: Train mse = 0.2865 | Val mse = 0.5215
Epoch 38: Train mse = 0.2748 | Val mse = 0.5137
Epoch 39: Train mse = 0.2619 | Val mse = 0.5075
Epoch 40: Train mse = 0.2507 | Val mse = 0.5108
Epoch 41: Train mse = 0.2457 | Val mse = 0.5018
Epoch 42: Train mse = 0.2384 | Val mse = 0.4963
Epoch 43: Train mse = 0.2308 | Val mse = 0.4920
Epoch 44: Train mse = 0.2296 | Val mse = 0.5042
Epoch 45: Train mse = 0.2256 | Val mse = 0.4863
Epoch 46: Train mse = 0.2195 | Val mse = 0.4986
Epoch 47: Train mse = 0.2204 | Val mse = 0.4998
Epoch 48: Train mse = 0.2155 | Val mse = 0.4941
Epoch 49: Train mse = 0.2126 | Val mse = 0.5091
Epoch 50: Train mse = 0.2079 | Val mse = 0.5001
Using device: mps
Epoch 1: Train mse = 1.0033 | Val mse = 1.2244
Epoch 2: Train mse = 1.0003 | Val mse = 1.2193
Epoch 3: Train mse = 0.9920 | Val mse = 1.1956
Epoch 4: Train mse = 0.9736 | Val mse = 1.1342
Epoch 5: Train mse = 0.9423 | Val mse = 1.0371
Epoch 6: Train mse = 0.9113 | Val mse = 0.9600
Epoch 7: Train mse = 0.9063 | Val mse = 0.9501
Epoch 8: Train mse = 0.8904 | Val mse = 0.9592
Epoch 9: Train mse = 0.8790 | Val mse = 0.9504
Epoch 10: Train mse = 0.8647 | Val mse = 0.9349
Epoch 11: Train mse = 0.8569 | Val mse = 0.9219
Epoch 12: Train mse = 0.8478 | Val mse = 0.9165
Epoch 13: Train mse = 0.8476 | Val mse = 0.9141
Epoch 14: Train mse = 0.8388 | Val mse = 0.9038
Epoch 15: Train mse = 0.8301 | Val mse = 0.8970
Epoch 16: Train mse = 0.8081 | Val mse = 0.8751
Epoch 17: Train mse = 0.7848 | Val mse = 0.8487
Epoch 18: Train mse = 0.7569 | Val mse = 0.8147
Epoch 19: Train mse = 0.7379 | Val mse = 0.7973
Epoch 20: Train mse = 0.7223 | Val mse = 0.7912
Epoch 21: Train mse = 0.7071 | Val mse = 0.7887
Epoch 22: Train mse = 0.6956 | Val mse = 0.7815
Epoch 23: Train mse = 0.6797 | Val mse = 0.7770
Epoch 24: Train mse = 0.6668 | Val mse = 0.7570
Epoch 25: Train mse = 0.6470 | Val mse = 0.7460
Epoch 26: Train mse = 0.6262 | Val mse = 0.7329
Epoch 27: Train mse = 0.5985 | Val mse = 0.7141
Epoch 28: Train mse = 0.5729 | Val mse = 0.6868
Epoch 29: Train mse = 0.5432 | Val mse = 0.6658
Epoch 30: Train mse = 0.5100 | Val mse = 0.6379
Epoch 31: Train mse = 0.4768 | Val mse = 0.6051
Epoch 32: Train mse = 0.4457 | Val mse = 0.5848
Epoch 33: Train mse = 0.4121 | Val mse = 0.5727
Epoch 34: Train mse = 0.3792 | Val mse = 0.5470
Epoch 35: Train mse = 0.3537 | Val mse = 0.5294
Epoch 36: Train mse = 0.3312 | Val mse = 0.5122
Epoch 37: Train mse = 0.3027 | Val mse = 0.4972
Epoch 38: Train mse = 0.2904 | Val mse = 0.4954
Epoch 39: Train mse = 0.2761 | Val mse = 0.4866
Epoch 40: Train mse = 0.2632 | Val mse = 0.4830
Epoch 41: Train mse = 0.2574 | Val mse = 0.4780
Epoch 42: Train mse = 0.2498 | Val mse = 0.4685
Epoch 43: Train mse = 0.2410 | Val mse = 0.4668
Epoch 44: Train mse = 0.2402 | Val mse = 0.4709
Epoch 45: Train mse = 0.2348 | Val mse = 0.4562
Epoch 46: Train mse = 0.2279 | Val mse = 0.4719
Epoch 47: Train mse = 0.2297 | Val mse = 0.4691
Epoch 48: Train mse = 0.2233 | Val mse = 0.4762
Epoch 49: Train mse = 0.2210 | Val mse = 0.4880
Epoch 50: Train mse = 0.2165 | Val mse = 0.4765
Using device: mps
Epoch 1: Train mse = 0.9744 | Val mse = 0.9415
Epoch 2: Train mse = 0.8960 | Val mse = 0.9540
Epoch 3: Train mse = 0.8040 | Val mse = 0.8227
Epoch 4: Train mse = 0.7461 | Val mse = 0.8008
Epoch 5: Train mse = 0.6854 | Val mse = 0.7477
Epoch 6: Train mse = 0.5741 | Val mse = 0.5974
Epoch 7: Train mse = 0.4586 | Val mse = 0.5259
Epoch 8: Train mse = 0.3606 | Val mse = 0.4601
Epoch 9: Train mse = 0.3157 | Val mse = 0.4407
Epoch 10: Train mse = 0.2881 | Val mse = 0.3841
Epoch 11: Train mse = 0.2652 | Val mse = 0.3525
Epoch 12: Train mse = 0.2496 | Val mse = 0.3580
Epoch 13: Train mse = 0.2363 | Val mse = 0.3488
Epoch 14: Train mse = 0.2277 | Val mse = 0.3655
Epoch 15: Train mse = 0.2225 | Val mse = 0.3476
Epoch 16: Train mse = 0.2131 | Val mse = 0.3632
Epoch 17: Train mse = 0.2131 | Val mse = 0.3720
Epoch 18: Train mse = 0.2027 | Val mse = 0.3564
Epoch 19: Train mse = 0.2025 | Val mse = 0.3445
Epoch 20: Train mse = 0.1968 | Val mse = 0.3738
Epoch 21: Train mse = 0.1940 | Val mse = 0.3512
Epoch 22: Train mse = 0.1878 | Val mse = 0.3772
Epoch 23: Train mse = 0.1844 | Val mse = 0.3476
Epoch 24: Train mse = 0.1806 | Val mse = 0.3530
Epoch 25: Train mse = 0.1743 | Val mse = 0.3503
Epoch 26: Train mse = 0.1704 | Val mse = 0.3619
Epoch 27: Train mse = 0.1668 | Val mse = 0.3648
Epoch 28: Train mse = 0.1624 | Val mse = 0.3706
Epoch 29: Train mse = 0.1589 | Val mse = 0.3756
Epoch 30: Train mse = 0.1556 | Val mse = 0.3746
Early stopping triggered at epoch 30.
Using device: mps
Epoch 1: Train mse = 0.9748 | Val mse = 0.9430
Epoch 2: Train mse = 0.8970 | Val mse = 0.9633
Epoch 3: Train mse = 0.8126 | Val mse = 0.8296
Epoch 4: Train mse = 0.7504 | Val mse = 0.7954
Epoch 5: Train mse = 0.6923 | Val mse = 0.7529
Epoch 6: Train mse = 0.5845 | Val mse = 0.6024
Epoch 7: Train mse = 0.4690 | Val mse = 0.5276
Epoch 8: Train mse = 0.3682 | Val mse = 0.4392
Epoch 9: Train mse = 0.3117 | Val mse = 0.4288
Epoch 10: Train mse = 0.2876 | Val mse = 0.3671
Epoch 11: Train mse = 0.2697 | Val mse = 0.3527
Epoch 12: Train mse = 0.2562 | Val mse = 0.3475
Epoch 13: Train mse = 0.2455 | Val mse = 0.3311
Epoch 14: Train mse = 0.2300 | Val mse = 0.3483
Epoch 15: Train mse = 0.2246 | Val mse = 0.3490
Epoch 16: Train mse = 0.2161 | Val mse = 0.3360
Epoch 17: Train mse = 0.2100 | Val mse = 0.3403
Epoch 18: Train mse = 0.2032 | Val mse = 0.3571
Epoch 19: Train mse = 0.2025 | Val mse = 0.3507
Epoch 20: Train mse = 0.1935 | Val mse = 0.3507
Epoch 21: Train mse = 0.1871 | Val mse = 0.3474
Epoch 22: Train mse = 0.1806 | Val mse = 0.3596
Epoch 23: Train mse = 0.1781 | Val mse = 0.3523
Epoch 24: Train mse = 0.1761 | Val mse = 0.3617
Epoch 25: Train mse = 0.1709 | Val mse = 0.3591
Epoch 26: Train mse = 0.1674 | Val mse = 0.3667
Epoch 27: Train mse = 0.1642 | Val mse = 0.3626
Epoch 28: Train mse = 0.1603 | Val mse = 0.3714
Epoch 29: Train mse = 0.1582 | Val mse = 0.3668
Epoch 30: Train mse = 0.1557 | Val mse = 0.3704
Early stopping triggered at epoch 30.

Best parameters found in single holdout (grid):
  batch_size: 128
  hidden_dim: 32
  hidden_layers: 4
  knots: 10
  lr: 0.005
  spline_power: 7

Refitting with selected hyperparameters on (train_core + train_val), new early-stop split from that union…
Using device: mps
Epoch 1: Train mse = 0.9255 | Val mse = 0.9724
Epoch 2: Train mse = 0.7916 | Val mse = 0.8157
Epoch 3: Train mse = 0.6388 | Val mse = 0.6205
Epoch 4: Train mse = 0.4332 | Val mse = 0.4116
Epoch 5: Train mse = 0.3296 | Val mse = 0.3722
Epoch 6: Train mse = 0.3018 | Val mse = 0.3335
Epoch 7: Train mse = 0.2849 | Val mse = 0.3253
Epoch 8: Train mse = 0.2774 | Val mse = 0.3497
Epoch 9: Train mse = 0.2692 | Val mse = 0.3287
Epoch 10: Train mse = 0.2599 | Val mse = 0.3315
Epoch 11: Train mse = 0.2544 | Val mse = 0.3151
Epoch 12: Train mse = 0.2460 | Val mse = 0.3386
Epoch 13: Train mse = 0.2403 | Val mse = 0.3294
Epoch 14: Train mse = 0.2354 | Val mse = 0.3443
Epoch 15: Train mse = 0.2291 | Val mse = 0.3545
Epoch 16: Train mse = 0.2242 | Val mse = 0.3569
Epoch 17: Train mse = 0.2173 | Val mse = 0.3504
Epoch 18: Train mse = 0.2116 | Val mse = 0.3506
Epoch 19: Train mse = 0.2065 | Val mse = 0.3498
Epoch 20: Train mse = 0.2033 | Val mse = 0.3552
Epoch 21: Train mse = 0.2022 | Val mse = 0.3462
Epoch 22: Train mse = 0.1950 | Val mse = 0.3599
Epoch 23: Train mse = 0.1962 | Val mse = 0.3545
Epoch 24: Train mse = 0.1912 | Val mse = 0.3658
Epoch 25: Train mse = 0.1906 | Val mse = 0.3605
Epoch 26: Train mse = 0.1898 | Val mse = 0.3614
Epoch 27: Train mse = 0.1870 | Val mse = 0.3495
Epoch 28: Train mse = 0.1837 | Val mse = 0.3576
Epoch 29: Train mse = 0.1843 | Val mse = 0.3768
Epoch 30: Train mse = 0.1824 | Val mse = 0.3539
Early stopping triggered at epoch 30.

Results for the refit model (single holdout):

--- Task 1 ---
1 day(s) MAE                       : 0.09252019
1 day(s) RMSE                      : 0.17764964
1 day(s) R2                        : 0.30856554
1 day(s) Pearson r                 : 0.56776015
1 day(s) QLIKE                     : 0.36329599
3 day(s) MAE                       : 0.09563189
3 day(s) RMSE                      : 0.18181676
3 day(s) R2                        : 0.27015979
3 day(s) Pearson r                 : 0.54133039
3 day(s) QLIKE                     : 0.41800874
5 day(s) MAE                       : 0.09779209
5 day(s) RMSE                      : 0.18419081
5 day(s) R2                        : 0.24246419
5 day(s) Pearson r                 : 0.52675256
5 day(s) QLIKE                     : 0.44165327
10 day(s) MAE                      : 0.09992824
10 day(s) RMSE                     : 0.19002092
10 day(s) R2                       : 0.18605322
10 day(s) Pearson r                : 0.48376168
10 day(s) QLIKE                    : 0.45850896
20 day(s) MAE                      : 0.10213812
20 day(s) RMSE                     : 0.19326780
20 day(s) R2                       : 0.14806914
20 day(s) Pearson r                : 0.44689942
20 day(s) QLIKE                    : 0.49436390
full horizon MAE                   : 0.10213812
full horizon RMSE                  : 0.19326780
full horizon R2                    : 0.14806914
full horizon Pearson r             : 0.44689942
full horizon QLIKE                 : 0.49436390

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00319653, max=0.952788

Best single-holdout (refit) model saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/tune_saved_object/EURUSD/Simple_KAN_H20.pkl

=== AAPL | H=1 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714204
  Min value:  -0.03434294798653995
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.13326641523511
  Min value:  -2.354189421011421
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.051920323212192546
  Min value:  -0.0338203458440787
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.906984328061974
  Min value:  -1.6295009124362931
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.19306601030720755
  Min value:  -0.03421247164703902
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.8950185841626346
  Min value:  -2.119931409321887
Epoch 1: Train mse = 1.0053 | Val mse = 0.5075
Epoch 2: Train mse = 0.9592 | Val mse = 0.5476
Epoch 3: Train mse = 0.9382 | Val mse = 0.5856
Epoch 4: Train mse = 0.9258 | Val mse = 0.5513
Epoch 5: Train mse = 0.9315 | Val mse = 0.5267
Epoch 6: Train mse = 0.9337 | Val mse = 0.5410
Epoch 7: Train mse = 0.9313 | Val mse = 0.5645
Epoch 8: Train mse = 0.9175 | Val mse = 0.5654
Epoch 9: Train mse = 0.9042 | Val mse = 0.5033
Epoch 10: Train mse = 0.8865 | Val mse = 0.6743
Epoch 11: Train mse = 0.8625 | Val mse = 0.4420
Epoch 12: Train mse = 0.8238 | Val mse = 0.3964
Epoch 13: Train mse = 0.7722 | Val mse = 0.4718
Epoch 14: Train mse = 0.7341 | Val mse = 0.3833
Epoch 15: Train mse = 0.6993 | Val mse = 0.5085
Epoch 16: Train mse = 0.7034 | Val mse = 0.5098
Epoch 17: Train mse = 0.6912 | Val mse = 0.4392
Epoch 18: Train mse = 0.7164 | Val mse = 0.5452
Epoch 19: Train mse = 0.6878 | Val mse = 0.3483
Epoch 20: Train mse = 0.6456 | Val mse = 0.4198
Epoch 21: Train mse = 0.6019 | Val mse = 0.3500
Epoch 22: Train mse = 0.6041 | Val mse = 0.3548
Epoch 23: Train mse = 0.5795 | Val mse = 0.3265
Epoch 24: Train mse = 0.5646 | Val mse = 0.3849
Epoch 25: Train mse = 0.5594 | Val mse = 0.3231
Epoch 26: Train mse = 0.5475 | Val mse = 0.3961
Epoch 27: Train mse = 0.5630 | Val mse = 0.3441
Epoch 28: Train mse = 0.5349 | Val mse = 0.3475
Epoch 29: Train mse = 0.5329 | Val mse = 0.3180
Epoch 30: Train mse = 0.5142 | Val mse = 0.3192
Epoch 31: Train mse = 0.5479 | Val mse = 0.3958
Epoch 32: Train mse = 0.5473 | Val mse = 0.3665
Epoch 33: Train mse = 0.5457 | Val mse = 0.3636
Epoch 34: Train mse = 0.5480 | Val mse = 0.3179
Epoch 35: Train mse = 0.5275 | Val mse = 0.3302
Epoch 36: Train mse = 0.5292 | Val mse = 0.3561
Epoch 37: Train mse = 0.5314 | Val mse = 0.3120
Epoch 38: Train mse = 0.5017 | Val mse = 0.3151
Epoch 39: Train mse = 0.5105 | Val mse = 0.3610
Epoch 40: Train mse = 0.5172 | Val mse = 0.3075
Epoch 41: Train mse = 0.5025 | Val mse = 0.3185
Epoch 42: Train mse = 0.4980 | Val mse = 0.3455
Epoch 43: Train mse = 0.5004 | Val mse = 0.3180
Epoch 44: Train mse = 0.4911 | Val mse = 0.3026
Epoch 45: Train mse = 0.4946 | Val mse = 0.3092
Epoch 46: Train mse = 0.4841 | Val mse = 0.3065
Epoch 47: Train mse = 0.4772 | Val mse = 0.3006
Epoch 48: Train mse = 0.4944 | Val mse = 0.2971
Epoch 49: Train mse = 0.4707 | Val mse = 0.3146
Epoch 50: Train mse = 0.4783 | Val mse = 0.2992

Parameters used in the single-fit model:
input_dim: 60
output_dim: 1
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.09291805
1 day(s) RMSE                      : 7.94515940
1 day(s) R2                        : 0.15488039
1 day(s) Pearson r                 : 0.42508621
1 day(s) QLIKE                     : 0.27848419
full horizon MAE                   : 2.09291805
full horizon RMSE                  : 7.94515940
full horizon R2                    : 0.15488039
full horizon Pearson r             : 0.42508621
full horizon QLIKE                 : 0.27848419

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/AAPL/Simple_KAN_H1.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=0.910116, max=42.1542

=== AAPL | H=5 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714204
  Min value:  -0.03434294798653995
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.140231129276094
  Min value:  -2.358314580069225
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.051920323212192546
  Min value:  -0.0338203458440787
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.907938252663739
  Min value:  -1.632926464574096
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.19306601030720755
  Min value:  -0.03421247164703902
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.896926346992255
  Min value:  -2.123830418076347
Epoch 1: Train mse = 0.9891 | Val mse = 0.5359
Epoch 2: Train mse = 0.9560 | Val mse = 0.5720
Epoch 3: Train mse = 0.9503 | Val mse = 0.5444
Epoch 4: Train mse = 0.9461 | Val mse = 0.5649
Epoch 5: Train mse = 0.9304 | Val mse = 0.5143
Epoch 6: Train mse = 0.9098 | Val mse = 0.5279
Epoch 7: Train mse = 0.8673 | Val mse = 0.8307
Epoch 8: Train mse = 0.8977 | Val mse = 0.4213
Epoch 9: Train mse = 0.8272 | Val mse = 0.5242
Epoch 10: Train mse = 0.8127 | Val mse = 0.3998
Epoch 11: Train mse = 0.7924 | Val mse = 0.5530
Epoch 12: Train mse = 0.7598 | Val mse = 0.3725
Epoch 13: Train mse = 0.7171 | Val mse = 0.3614
Epoch 14: Train mse = 0.6899 | Val mse = 0.3632
Epoch 15: Train mse = 0.6846 | Val mse = 0.3885
Epoch 16: Train mse = 0.6722 | Val mse = 0.3592
Epoch 17: Train mse = 0.6530 | Val mse = 0.3651
Epoch 18: Train mse = 0.6485 | Val mse = 0.3718
Epoch 19: Train mse = 0.6275 | Val mse = 0.3714
Epoch 20: Train mse = 0.6338 | Val mse = 0.4259
Epoch 21: Train mse = 0.6296 | Val mse = 0.5326
Epoch 22: Train mse = 0.6995 | Val mse = 0.4398
Epoch 23: Train mse = 0.6728 | Val mse = 0.3708
Epoch 24: Train mse = 0.6525 | Val mse = 0.4712
Epoch 25: Train mse = 0.6376 | Val mse = 0.4230
Epoch 26: Train mse = 0.6805 | Val mse = 0.3777
Epoch 27: Train mse = 0.6278 | Val mse = 0.3803
Epoch 28: Train mse = 0.6282 | Val mse = 0.3783
Epoch 29: Train mse = 0.6348 | Val mse = 0.4645
Epoch 30: Train mse = 0.6227 | Val mse = 0.3748
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 5
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.25110548
1 day(s) RMSE                      : 8.34395350
1 day(s) R2                        : 0.06791246
1 day(s) Pearson r                 : 0.45744229
1 day(s) QLIKE                     : 0.33601700
3 day(s) MAE                       : 2.37829843
3 day(s) RMSE                      : 8.68364087
3 day(s) R2                        : 0.04583297
3 day(s) Pearson r                 : 0.41146993
3 day(s) QLIKE                     : 0.37382729
5 day(s) MAE                       : 2.42497110
5 day(s) RMSE                      : 8.81880666
5 day(s) R2                        : 0.02675646
5 day(s) Pearson r                 : 0.34551270
5 day(s) QLIKE                     : 0.40330327
full horizon MAE                   : 2.42497110
full horizon RMSE                  : 8.81880666
full horizon R2                    : 0.02675646
full horizon Pearson r             : 0.34551270
full horizon QLIKE                 : 0.40330327

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/AAPL/Simple_KAN_H5.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=1.10249, max=10.0947

=== AAPL | H=10 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 10
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714204
  Min value:  -0.03434294798653995
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.14985814599714
  Min value:  -2.3634631558377808
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.051920323212192546
  Min value:  -0.0338203458440787
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.909556794039435
  Min value:  -1.6371429190585964
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.19306601030720755
  Min value:  -0.03421247164703902
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.8998157347365474
  Min value:  -2.1286776824868885
Epoch 1: Train mse = 0.9898 | Val mse = 0.5641
Epoch 2: Train mse = 0.9701 | Val mse = 0.5563
Epoch 3: Train mse = 0.9656 | Val mse = 0.5651
Epoch 4: Train mse = 0.9581 | Val mse = 0.5675
Epoch 5: Train mse = 0.9407 | Val mse = 0.4860
Epoch 6: Train mse = 0.9193 | Val mse = 0.4606
Epoch 7: Train mse = 0.8759 | Val mse = 0.8520
Epoch 8: Train mse = 0.9160 | Val mse = 0.4071
Epoch 9: Train mse = 0.8396 | Val mse = 0.6457
Epoch 10: Train mse = 0.8397 | Val mse = 0.4058
Epoch 11: Train mse = 0.8139 | Val mse = 0.5295
Epoch 12: Train mse = 0.7736 | Val mse = 0.3798
Epoch 13: Train mse = 0.7621 | Val mse = 0.3747
Epoch 14: Train mse = 0.7423 | Val mse = 0.3802
Epoch 15: Train mse = 0.7299 | Val mse = 0.3755
Epoch 16: Train mse = 0.7030 | Val mse = 0.4419
Epoch 17: Train mse = 0.7069 | Val mse = 0.5444
Epoch 18: Train mse = 0.7402 | Val mse = 0.4959
Epoch 19: Train mse = 0.7195 | Val mse = 0.3984
Epoch 20: Train mse = 0.7311 | Val mse = 0.4568
Epoch 21: Train mse = 0.7095 | Val mse = 0.3802
Epoch 22: Train mse = 0.6900 | Val mse = 0.3766
Epoch 23: Train mse = 0.6784 | Val mse = 0.4264
Epoch 24: Train mse = 0.6762 | Val mse = 0.4274
Epoch 25: Train mse = 0.6693 | Val mse = 0.4022
Epoch 26: Train mse = 0.7168 | Val mse = 0.3735
Epoch 27: Train mse = 0.6855 | Val mse = 0.4669
Epoch 28: Train mse = 0.6843 | Val mse = 0.4031
Epoch 29: Train mse = 0.6993 | Val mse = 0.4212
Epoch 30: Train mse = 0.6723 | Val mse = 0.3904
Epoch 31: Train mse = 0.6590 | Val mse = 0.3731
Epoch 32: Train mse = 0.6582 | Val mse = 0.4205
Epoch 33: Train mse = 0.6651 | Val mse = 0.3853
Epoch 34: Train mse = 0.6573 | Val mse = 0.3919
Epoch 35: Train mse = 0.6774 | Val mse = 0.4104
Epoch 36: Train mse = 0.6671 | Val mse = 0.3988
Epoch 37: Train mse = 0.6543 | Val mse = 0.3899
Epoch 38: Train mse = 0.6672 | Val mse = 0.4333
Epoch 39: Train mse = 0.6700 | Val mse = 0.3790
Epoch 40: Train mse = 0.6513 | Val mse = 0.3726
Epoch 41: Train mse = 0.6575 | Val mse = 0.4257
Epoch 42: Train mse = 0.6578 | Val mse = 0.3751
Epoch 43: Train mse = 0.6402 | Val mse = 0.3728
Epoch 44: Train mse = 0.6399 | Val mse = 0.3736
Epoch 45: Train mse = 0.6406 | Val mse = 0.3955
Epoch 46: Train mse = 0.6427 | Val mse = 0.3813
Epoch 47: Train mse = 0.6361 | Val mse = 0.3743
Epoch 48: Train mse = 0.6406 | Val mse = 0.4132
Epoch 49: Train mse = 0.6409 | Val mse = 0.3798
Epoch 50: Train mse = 0.6331 | Val mse = 0.3734
Early stopping triggered at epoch 50.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 10
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.20014539
1 day(s) RMSE                      : 8.22099640
1 day(s) R2                        : 0.09518068
1 day(s) Pearson r                 : 0.48762716
1 day(s) QLIKE                     : 0.30315272
3 day(s) MAE                       : 2.35183440
3 day(s) RMSE                      : 8.54207272
3 day(s) R2                        : 0.07669066
3 day(s) Pearson r                 : 0.44946218
3 day(s) QLIKE                     : 0.33890736
5 day(s) MAE                       : 2.41954112
5 day(s) RMSE                      : 8.70379274
5 day(s) R2                        : 0.05197679
5 day(s) Pearson r                 : 0.37753169
5 day(s) QLIKE                     : 0.37228861
10 day(s) MAE                      : 2.50346173
10 day(s) RMSE                     : 8.91896613
10 day(s) R2                       : 0.01271223
10 day(s) Pearson r                : 0.25150718
10 day(s) QLIKE                    : 0.42610093
full horizon MAE                   : 2.50346173
full horizon RMSE                  : 8.91896613
full horizon R2                    : 0.01271223
full horizon Pearson r             : 0.25150718
full horizon QLIKE                 : 0.42610093

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/AAPL/Simple_KAN_H10.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=0.934215, max=13.0407

=== AAPL | H=20 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714204
  Min value:  -0.03434294798653995
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.17404540383974
  Min value:  -2.37751047680313
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.051920323212192546
  Min value:  -0.0338203458440787
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.9130206404899854
  Min value:  -1.6487782037743786
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.19306601030720755
  Min value:  -0.03421247164703902
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.906568131251477
  Min value:  -2.14194530457574
Epoch 1: Train mse = 0.9910 | Val mse = 0.5617
Epoch 2: Train mse = 0.9783 | Val mse = 0.5729
Epoch 3: Train mse = 0.9744 | Val mse = 0.5637
Epoch 4: Train mse = 0.9718 | Val mse = 0.5857
Epoch 5: Train mse = 0.9566 | Val mse = 0.5004
Epoch 6: Train mse = 0.9451 | Val mse = 0.5708
Epoch 7: Train mse = 0.9194 | Val mse = 0.7146
Epoch 8: Train mse = 0.9179 | Val mse = 0.4578
Epoch 9: Train mse = 0.8673 | Val mse = 0.4113
Epoch 10: Train mse = 0.8416 | Val mse = 0.6305
Epoch 11: Train mse = 0.8394 | Val mse = 0.3961
Epoch 12: Train mse = 0.8093 | Val mse = 0.3752
Epoch 13: Train mse = 0.8231 | Val mse = 0.4683
Epoch 14: Train mse = 0.7944 | Val mse = 0.4825
Epoch 15: Train mse = 0.7757 | Val mse = 0.3699
Epoch 16: Train mse = 0.7705 | Val mse = 0.3751
Epoch 17: Train mse = 0.7754 | Val mse = 0.3782
Epoch 18: Train mse = 0.7501 | Val mse = 0.4101
Epoch 19: Train mse = 0.7404 | Val mse = 0.4434
Epoch 20: Train mse = 0.7555 | Val mse = 0.5498
Epoch 21: Train mse = 0.7871 | Val mse = 0.4997
Epoch 22: Train mse = 0.7771 | Val mse = 0.3684
Epoch 23: Train mse = 0.7644 | Val mse = 0.4714
Epoch 24: Train mse = 0.7532 | Val mse = 0.3880
Epoch 25: Train mse = 0.7521 | Val mse = 0.3824
Epoch 26: Train mse = 0.7720 | Val mse = 0.4754
Epoch 27: Train mse = 0.7496 | Val mse = 0.3846
Epoch 28: Train mse = 0.7619 | Val mse = 0.3691
Epoch 29: Train mse = 0.7627 | Val mse = 0.5362
Epoch 30: Train mse = 0.7520 | Val mse = 0.3719
Epoch 31: Train mse = 0.7504 | Val mse = 0.4614
Epoch 32: Train mse = 0.7414 | Val mse = 0.3854
Early stopping triggered at epoch 32.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 20
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.30159873
1 day(s) RMSE                      : 8.52796474
1 day(s) R2                        : 0.02634804
1 day(s) Pearson r                 : 0.43032947
1 day(s) QLIKE                     : 0.34556555
3 day(s) MAE                       : 2.41229240
3 day(s) RMSE                      : 8.82574264
3 day(s) R2                        : 0.01434889
3 day(s) Pearson r                 : 0.39985512
3 day(s) QLIKE                     : 0.37992373
5 day(s) MAE                       : 2.44877101
5 day(s) RMSE                      : 8.93104856
5 day(s) R2                        : 0.00182477
5 day(s) Pearson r                 : 0.33653280
5 day(s) QLIKE                     : 0.40626129
10 day(s) MAE                      : 2.48935098
10 day(s) RMSE                     : 9.04631660
10 day(s) R2                       : -0.01568326
10 day(s) Pearson r                : 0.23878478
10 day(s) QLIKE                    : 0.44873021
20 day(s) MAE                      : 2.51857575
20 day(s) RMSE                     : 9.10493087
20 day(s) R2                       : -0.02451126
20 day(s) Pearson r                : 0.18519968
20 day(s) QLIKE                    : 0.47639951
full horizon MAE                   : 2.51857575
full horizon RMSE                  : 9.10493087
full horizon R2                    : -0.02451126
full horizon Pearson r             : 0.18519968
full horizon QLIKE                 : 0.47639951

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/AAPL/Simple_KAN_H20.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=1.04742, max=8.53787

=== MSFT | H=1 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.550928529745955
  Min value:  -0.49195094366183095
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.2181198470171735
  Min value:  -2.80514498305857
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9099560750963094
  Min value:  -0.42477627271922275
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.894759677334685
  Min value:  -1.3150487901568384
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.030086059790822
  Min value:  -0.4794650148863312
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.930619914036178
  Min value:  -2.329935051243564
Epoch 1: Train mse = 0.9254 | Val mse = 0.4698
Epoch 2: Train mse = 0.7538 | Val mse = 0.4615
Epoch 3: Train mse = 0.6147 | Val mse = 0.5705
Epoch 4: Train mse = 0.5394 | Val mse = 0.5249
Epoch 5: Train mse = 0.4789 | Val mse = 0.5537
Epoch 6: Train mse = 0.4585 | Val mse = 0.4981
Epoch 7: Train mse = 0.4176 | Val mse = 0.4731
Epoch 8: Train mse = 0.3971 | Val mse = 0.5471
Epoch 9: Train mse = 0.3738 | Val mse = 0.5202
Epoch 10: Train mse = 0.3604 | Val mse = 0.5683
Epoch 11: Train mse = 0.3516 | Val mse = 0.5048
Epoch 12: Train mse = 0.3277 | Val mse = 0.5299
Epoch 13: Train mse = 0.3119 | Val mse = 0.4805
Epoch 14: Train mse = 0.3033 | Val mse = 0.5719
Epoch 15: Train mse = 0.2895 | Val mse = 0.5775
Epoch 16: Train mse = 0.2641 | Val mse = 0.5606
Epoch 17: Train mse = 0.2481 | Val mse = 0.6413
Epoch 18: Train mse = 0.2235 | Val mse = 0.6416
Epoch 19: Train mse = 0.2074 | Val mse = 0.6176
Epoch 20: Train mse = 0.2026 | Val mse = 0.6393
Epoch 21: Train mse = 0.1849 | Val mse = 0.7020
Epoch 22: Train mse = 0.1649 | Val mse = 0.6636
Epoch 23: Train mse = 0.1580 | Val mse = 0.7256
Epoch 24: Train mse = 0.1394 | Val mse = 0.6844
Epoch 25: Train mse = 0.1263 | Val mse = 0.6885
Epoch 26: Train mse = 0.1222 | Val mse = 0.7086
Epoch 27: Train mse = 0.1096 | Val mse = 0.7359
Epoch 28: Train mse = 0.1021 | Val mse = 0.7173
Epoch 29: Train mse = 0.0972 | Val mse = 0.7057
Epoch 30: Train mse = 0.0848 | Val mse = 0.7699
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 1
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.05990044
1 day(s) RMSE                      : 6.69425260
1 day(s) R2                        : 0.00254031
1 day(s) Pearson r                 : 0.13176566
1 day(s) QLIKE                     : 0.48850694
full horizon MAE                   : 2.05990044
full horizon RMSE                  : 6.69425260
full horizon R2                    : 0.00254031
full horizon Pearson r             : 0.13176566
full horizon QLIKE                 : 0.48850694

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/MSFT/Simple_KAN_H1.pkl

Saved y_true min=0.27301, max=89.8516
Saved y_pred min=1.29444, max=6.91675

=== MSFT | H=5 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.550928529745955
  Min value:  -0.49195094366183095
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.21838735918136
  Min value:  -2.8084790413953242
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9099560750963094
  Min value:  -0.42477627271922275
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.8943485628110612
  Min value:  -1.3176187186164257
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.030086059790822
  Min value:  -0.4794650148863312
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9307399945851973
  Min value:  -2.33302541920076
Epoch 1: Train mse = 0.9563 | Val mse = 0.4579
Epoch 2: Train mse = 0.7624 | Val mse = 0.5573
Epoch 3: Train mse = 0.6701 | Val mse = 0.5553
Epoch 4: Train mse = 0.6024 | Val mse = 0.5101
Epoch 5: Train mse = 0.5674 | Val mse = 0.5017
Epoch 6: Train mse = 0.5519 | Val mse = 0.5015
Epoch 7: Train mse = 0.5330 | Val mse = 0.5092
Epoch 8: Train mse = 0.5163 | Val mse = 0.5042
Epoch 9: Train mse = 0.5010 | Val mse = 0.5265
Epoch 10: Train mse = 0.4867 | Val mse = 0.5352
Epoch 11: Train mse = 0.4768 | Val mse = 0.5345
Epoch 12: Train mse = 0.4623 | Val mse = 0.5143
Epoch 13: Train mse = 0.4517 | Val mse = 0.5425
Epoch 14: Train mse = 0.4337 | Val mse = 0.5918
Epoch 15: Train mse = 0.4233 | Val mse = 0.5521
Epoch 16: Train mse = 0.4086 | Val mse = 0.5846
Epoch 17: Train mse = 0.3898 | Val mse = 0.6368
Epoch 18: Train mse = 0.3727 | Val mse = 0.6296
Epoch 19: Train mse = 0.3474 | Val mse = 0.6888
Epoch 20: Train mse = 0.3335 | Val mse = 0.6975
Epoch 21: Train mse = 0.3151 | Val mse = 0.6753
Epoch 22: Train mse = 0.3042 | Val mse = 0.7631
Epoch 23: Train mse = 0.2956 | Val mse = 0.7191
Epoch 24: Train mse = 0.2853 | Val mse = 0.7395
Epoch 25: Train mse = 0.2596 | Val mse = 0.6841
Epoch 26: Train mse = 0.2447 | Val mse = 0.7641
Epoch 27: Train mse = 0.2353 | Val mse = 0.7305
Epoch 28: Train mse = 0.2196 | Val mse = 0.7143
Epoch 29: Train mse = 0.2047 | Val mse = 0.7753
Epoch 30: Train mse = 0.1907 | Val mse = 0.7252
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 5
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.08697930
1 day(s) RMSE                      : 6.74999687
1 day(s) R2                        : -0.01414092
1 day(s) Pearson r                 : 0.04331799
1 day(s) QLIKE                     : 0.47820556
3 day(s) MAE                       : 2.09707068
3 day(s) RMSE                      : 6.77808809
3 day(s) R2                        : -0.02266788
3 day(s) Pearson r                 : 0.01284365
3 day(s) QLIKE                     : 0.49117872
5 day(s) MAE                       : 2.10297842
5 day(s) RMSE                      : 6.78954492
5 day(s) R2                        : -0.02601286
5 day(s) Pearson r                 : -0.00831240
5 day(s) QLIKE                     : 0.49696362
full horizon MAE                   : 2.10297842
full horizon RMSE                  : 6.78954492
full horizon R2                    : -0.02601286
full horizon Pearson r             : -0.00831240
full horizon QLIKE                 : 0.49696362

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/MSFT/Simple_KAN_H5.pkl

Saved y_true min=0.27301, max=89.8516
Saved y_pred min=1.72465, max=6.17752

=== MSFT | H=10 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 10
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.550928529745955
  Min value:  -0.49195094366183095
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.2208777591762505
  Min value:  -2.8143642605456183
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9099560750963094
  Min value:  -0.42477627271922275
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.8952607849867555
  Min value:  -1.3217269183173053
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.030086059790822
  Min value:  -0.4794650148863312
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9328875355250505
  Min value:  -2.3383439250775
Epoch 1: Train mse = 0.9629 | Val mse = 0.4491
Epoch 2: Train mse = 0.8065 | Val mse = 0.5414
Epoch 3: Train mse = 0.7313 | Val mse = 0.5498
Epoch 4: Train mse = 0.6766 | Val mse = 0.5387
Epoch 5: Train mse = 0.6424 | Val mse = 0.4807
Epoch 6: Train mse = 0.6213 | Val mse = 0.4916
Epoch 7: Train mse = 0.6001 | Val mse = 0.5406
Epoch 8: Train mse = 0.5889 | Val mse = 0.5193
Epoch 9: Train mse = 0.5702 | Val mse = 0.5264
Epoch 10: Train mse = 0.5605 | Val mse = 0.5407
Epoch 11: Train mse = 0.5428 | Val mse = 0.5692
Epoch 12: Train mse = 0.5335 | Val mse = 0.5546
Epoch 13: Train mse = 0.5160 | Val mse = 0.5746
Epoch 14: Train mse = 0.5024 | Val mse = 0.5861
Epoch 15: Train mse = 0.4849 | Val mse = 0.6377
Epoch 16: Train mse = 0.4669 | Val mse = 0.6235
Epoch 17: Train mse = 0.4462 | Val mse = 0.6726
Epoch 18: Train mse = 0.4249 | Val mse = 0.6777
Epoch 19: Train mse = 0.4209 | Val mse = 0.6784
Epoch 20: Train mse = 0.4117 | Val mse = 0.6468
Epoch 21: Train mse = 0.3875 | Val mse = 0.6715
Epoch 22: Train mse = 0.3778 | Val mse = 0.6640
Epoch 23: Train mse = 0.3612 | Val mse = 0.6825
Epoch 24: Train mse = 0.3444 | Val mse = 0.6414
Epoch 25: Train mse = 0.3340 | Val mse = 0.7065
Epoch 26: Train mse = 0.3285 | Val mse = 0.6746
Epoch 27: Train mse = 0.3130 | Val mse = 0.6802
Epoch 28: Train mse = 0.3007 | Val mse = 0.6831
Epoch 29: Train mse = 0.2919 | Val mse = 0.6829
Epoch 30: Train mse = 0.2864 | Val mse = 0.6772
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 10
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.09371599
1 day(s) RMSE                      : 6.74870822
1 day(s) R2                        : -0.01375373
1 day(s) Pearson r                 : 0.03685442
1 day(s) QLIKE                     : 0.47787292
3 day(s) MAE                       : 2.10093592
3 day(s) RMSE                      : 6.78171489
3 day(s) R2                        : -0.02376258
3 day(s) Pearson r                 : 0.00789979
3 day(s) QLIKE                     : 0.49269768
5 day(s) MAE                       : 2.10425180
5 day(s) RMSE                      : 6.79225105
5 day(s) R2                        : -0.02683090
5 day(s) Pearson r                 : -0.01282886
5 day(s) QLIKE                     : 0.49814073
10 day(s) MAE                      : 2.12759607
10 day(s) RMSE                     : 6.80651790
10 day(s) R2                       : -0.03107345
10 day(s) Pearson r                : -0.03254652
10 day(s) QLIKE                    : 0.50281527
full horizon MAE                   : 2.12759607
full horizon RMSE                  : 6.80651790
full horizon R2                    : -0.03107345
full horizon Pearson r             : -0.03254652
full horizon QLIKE                 : 0.50281527

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/MSFT/Simple_KAN_H10.pkl

Saved y_true min=0.27301, max=89.8516
Saved y_pred min=1.67026, max=6.31979

=== MSFT | H=20 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.550928529745955
  Min value:  -0.49195094366183095
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.2181890995511795
  Min value:  -2.821740645483995
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9099560750963094
  Min value:  -0.42477627271922275
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.891688839722294
  Min value:  -1.3281087285369817
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.030086059790822
  Min value:  -0.4794650148863312
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.930006982137461
  Min value:  -2.3454031279530607
Epoch 1: Train mse = 0.9729 | Val mse = 0.4179
Epoch 2: Train mse = 0.8606 | Val mse = 0.5052
Epoch 3: Train mse = 0.7965 | Val mse = 0.4968
Epoch 4: Train mse = 0.7578 | Val mse = 0.5107
Epoch 5: Train mse = 0.7229 | Val mse = 0.4864
Epoch 6: Train mse = 0.6967 | Val mse = 0.4898
Epoch 7: Train mse = 0.6810 | Val mse = 0.5122
Epoch 8: Train mse = 0.6715 | Val mse = 0.5057
Epoch 9: Train mse = 0.6540 | Val mse = 0.5154
Epoch 10: Train mse = 0.6401 | Val mse = 0.5123
Epoch 11: Train mse = 0.6226 | Val mse = 0.5372
Epoch 12: Train mse = 0.6152 | Val mse = 0.5199
Epoch 13: Train mse = 0.5941 | Val mse = 0.5393
Epoch 14: Train mse = 0.5846 | Val mse = 0.5380
Epoch 15: Train mse = 0.5673 | Val mse = 0.5911
Epoch 16: Train mse = 0.5468 | Val mse = 0.5313
Epoch 17: Train mse = 0.5385 | Val mse = 0.5858
Epoch 18: Train mse = 0.5247 | Val mse = 0.5715
Epoch 19: Train mse = 0.5109 | Val mse = 0.7150
Epoch 20: Train mse = 0.4996 | Val mse = 0.5361
Epoch 21: Train mse = 0.4855 | Val mse = 0.6277
Epoch 22: Train mse = 0.4613 | Val mse = 0.5562
Epoch 23: Train mse = 0.4379 | Val mse = 0.5822
Epoch 24: Train mse = 0.4223 | Val mse = 0.6124
Epoch 25: Train mse = 0.4083 | Val mse = 0.5499
Epoch 26: Train mse = 0.3985 | Val mse = 0.5674
Epoch 27: Train mse = 0.3869 | Val mse = 0.5961
Epoch 28: Train mse = 0.3742 | Val mse = 0.5470
Epoch 29: Train mse = 0.3646 | Val mse = 0.5922
Epoch 30: Train mse = 0.3529 | Val mse = 0.5725
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 20
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.08556181
1 day(s) RMSE                      : 6.74987247
1 day(s) R2                        : -0.01410353
1 day(s) Pearson r                 : 0.03039663
1 day(s) QLIKE                     : 0.47572714
3 day(s) MAE                       : 2.09948756
3 day(s) RMSE                      : 6.78082289
3 day(s) R2                        : -0.02349328
3 day(s) Pearson r                 : 0.00300031
3 day(s) QLIKE                     : 0.49200768
5 day(s) MAE                       : 2.10071868
5 day(s) RMSE                      : 6.78909584
5 day(s) R2                        : -0.02587714
5 day(s) Pearson r                 : -0.01649653
5 day(s) QLIKE                     : 0.49618999
10 day(s) MAE                      : 2.12444029
10 day(s) RMSE                     : 6.80147687
10 day(s) R2                       : -0.02954676
10 day(s) Pearson r                : -0.03414388
10 day(s) QLIKE                    : 0.50029684
20 day(s) MAE                      : 2.17795226
20 day(s) RMSE                     : 6.99889758
20 day(s) R2                       : -0.02726827
20 day(s) Pearson r                : -0.02294529
20 day(s) QLIKE                    : 0.50306697
full horizon MAE                   : 2.17795226
full horizon RMSE                  : 6.99889758
full horizon R2                    : -0.02726827
full horizon Pearson r             : -0.02294529
full horizon QLIKE                 : 0.50306697

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/MSFT/Simple_KAN_H20.pkl

Saved y_true min=0.27301, max=89.8516
Saved y_pred min=1.70931, max=5.88749

=== GE | H=1 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.10128934238557
  Min value:  -0.033788504883313815
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.030029259880504
  Min value:  -2.652656432671519
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4221295497976101
  Min value:  -0.03368162544886536
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.0718644033108644
  Min value:  -2.366307284351862
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4422974929765818
  Min value:  -0.03368162544886536
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.646703896552383
  Min value:  -2.148420825574933
Epoch 1: Train mse = 0.9938 | Val mse = 0.7719
Epoch 2: Train mse = 0.9907 | Val mse = 0.8884
Epoch 3: Train mse = 0.9874 | Val mse = 0.8601
Epoch 4: Train mse = 0.9702 | Val mse = 0.8258
Epoch 5: Train mse = 0.9800 | Val mse = 0.8268
Epoch 6: Train mse = 0.9722 | Val mse = 0.8654
Epoch 7: Train mse = 0.9538 | Val mse = 0.8206
Epoch 8: Train mse = 0.8813 | Val mse = 1.2264
Epoch 9: Train mse = 0.8915 | Val mse = 0.8191
Epoch 10: Train mse = 0.7528 | Val mse = 0.4925
Epoch 11: Train mse = 0.7462 | Val mse = 0.8326
Epoch 12: Train mse = 0.7046 | Val mse = 0.5297
Epoch 13: Train mse = 0.6734 | Val mse = 0.9093
Epoch 14: Train mse = 0.6730 | Val mse = 0.8770
Epoch 15: Train mse = 0.6560 | Val mse = 0.6344
Epoch 16: Train mse = 0.6435 | Val mse = 0.9138
Epoch 17: Train mse = 0.6360 | Val mse = 0.6632
Epoch 18: Train mse = 0.6410 | Val mse = 0.4678
Epoch 19: Train mse = 0.6611 | Val mse = 0.8444
Epoch 20: Train mse = 0.6299 | Val mse = 0.5433
Epoch 21: Train mse = 0.6144 | Val mse = 0.5621
Epoch 22: Train mse = 0.5870 | Val mse = 0.7546
Epoch 23: Train mse = 0.5703 | Val mse = 0.5304
Epoch 24: Train mse = 0.5810 | Val mse = 0.5670
Epoch 25: Train mse = 0.5645 | Val mse = 0.5737
Epoch 26: Train mse = 0.5691 | Val mse = 0.5467
Epoch 27: Train mse = 0.5426 | Val mse = 0.6746
Epoch 28: Train mse = 0.5370 | Val mse = 0.5707
Epoch 29: Train mse = 0.5656 | Val mse = 0.5968
Epoch 30: Train mse = 0.6146 | Val mse = 0.5025
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 1
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.16510060
1 day(s) RMSE                      : 27.62973955
1 day(s) R2                        : -0.01489890
1 day(s) Pearson r                 : 0.02135212
1 day(s) QLIKE                     : 0.70782687
full horizon MAE                   : 4.16510060
full horizon RMSE                  : 27.62973955
full horizon R2                    : -0.01489890
full horizon Pearson r             : 0.02135212
full horizon QLIKE                 : 0.70782687

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/GE/Simple_KAN_H1.pkl

Saved y_true min=0.591065, max=527.531
Saved y_pred min=1.74756, max=12.9921

=== GE | H=5 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.10128934238557
  Min value:  -0.033788504883313815
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.05628901565786
  Min value:  -2.663641411446664
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4221295497976101
  Min value:  -0.03368162544886536
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.0759413314511908
  Min value:  -2.3763793738908188
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4422974929765818
  Min value:  -0.03368162544886536
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.658989486293715
  Min value:  -2.1577982868808543
Epoch 1: Train mse = 0.9991 | Val mse = 0.9131
Epoch 2: Train mse = 0.9932 | Val mse = 0.9112
Epoch 3: Train mse = 0.9935 | Val mse = 0.8607
Epoch 4: Train mse = 0.9855 | Val mse = 0.8800
Epoch 5: Train mse = 0.9839 | Val mse = 0.8868
Epoch 6: Train mse = 0.9778 | Val mse = 0.8464
Epoch 7: Train mse = 0.9520 | Val mse = 0.9441
Epoch 8: Train mse = 0.8467 | Val mse = 0.9627
Epoch 9: Train mse = 0.8783 | Val mse = 1.6082
Epoch 10: Train mse = 0.8841 | Val mse = 0.5860
Epoch 11: Train mse = 0.8600 | Val mse = 0.5924
Epoch 12: Train mse = 0.8855 | Val mse = 0.7008
Epoch 13: Train mse = 0.8417 | Val mse = 0.8750
Epoch 14: Train mse = 0.7904 | Val mse = 0.7858
Epoch 15: Train mse = 0.7210 | Val mse = 0.6099
Epoch 16: Train mse = 0.7098 | Val mse = 0.6118
Epoch 17: Train mse = 0.6581 | Val mse = 0.6129
Epoch 18: Train mse = 0.6419 | Val mse = 0.6728
Epoch 19: Train mse = 0.6272 | Val mse = 0.7941
Epoch 20: Train mse = 0.7148 | Val mse = 0.7506
Epoch 21: Train mse = 0.6846 | Val mse = 1.0326
Epoch 22: Train mse = 0.7003 | Val mse = 0.4515
Epoch 23: Train mse = 0.6698 | Val mse = 0.7895
Epoch 24: Train mse = 0.6367 | Val mse = 0.5284
Epoch 25: Train mse = 0.6275 | Val mse = 0.5942
Epoch 26: Train mse = 0.6096 | Val mse = 0.7522
Epoch 27: Train mse = 0.6231 | Val mse = 0.6312
Epoch 28: Train mse = 0.5960 | Val mse = 0.5194
Epoch 29: Train mse = 0.6094 | Val mse = 0.6909
Epoch 30: Train mse = 0.5935 | Val mse = 0.6980
Epoch 31: Train mse = 0.5913 | Val mse = 0.5902
Epoch 32: Train mse = 0.5780 | Val mse = 0.5866
Early stopping triggered at epoch 32.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 5
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.05822975
1 day(s) RMSE                      : 27.58341600
1 day(s) R2                        : -0.01149862
1 day(s) Pearson r                 : 0.02931681
1 day(s) QLIKE                     : 0.69370861
3 day(s) MAE                       : 4.12038763
3 day(s) RMSE                      : 27.59488814
3 day(s) R2                        : -0.01239844
3 day(s) Pearson r                 : 0.01810067
3 day(s) QLIKE                     : 0.70444985
5 day(s) MAE                       : 4.16206894
5 day(s) RMSE                      : 27.60343495
5 day(s) R2                        : -0.01308368
5 day(s) Pearson r                 : 0.00933897
5 day(s) QLIKE                     : 0.71453781
full horizon MAE                   : 4.16206894
full horizon RMSE                  : 27.60343495
full horizon R2                    : -0.01308368
full horizon Pearson r             : 0.00933897
full horizon QLIKE                 : 0.71453781

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/GE/Simple_KAN_H5.pkl

Saved y_true min=0.591065, max=527.531
Saved y_pred min=1.95213, max=11.6411

=== GE | H=10 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 10
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.10128934238557
  Min value:  -0.033788504883313815
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.080595466809852
  Min value:  -2.674432668646892
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4221295497976101
  Min value:  -0.03368162544886536
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.079343715915996
  Min value:  -2.3863103666656302
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4422974929765818
  Min value:  -0.03368162544886536
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.670127332275317
  Min value:  -2.167074694264489
Epoch 1: Train mse = 0.9973 | Val mse = 0.9485
Epoch 2: Train mse = 0.9903 | Val mse = 0.9421
Epoch 3: Train mse = 0.9872 | Val mse = 0.8897
Epoch 4: Train mse = 0.9816 | Val mse = 0.9019
Epoch 5: Train mse = 0.9753 | Val mse = 0.9017
Epoch 6: Train mse = 0.9513 | Val mse = 0.8946
Epoch 7: Train mse = 0.8740 | Val mse = 0.8438
Epoch 8: Train mse = 0.8453 | Val mse = 0.5438
Epoch 9: Train mse = 0.7968 | Val mse = 0.7913
Epoch 10: Train mse = 0.7902 | Val mse = 0.6465
Epoch 11: Train mse = 0.7752 | Val mse = 0.8781
Epoch 12: Train mse = 0.7376 | Val mse = 0.7444
Epoch 13: Train mse = 0.7169 | Val mse = 0.6393
Epoch 14: Train mse = 0.6872 | Val mse = 0.5695
Epoch 15: Train mse = 0.7079 | Val mse = 0.6222
Epoch 16: Train mse = 0.6718 | Val mse = 0.9295
Epoch 17: Train mse = 0.6733 | Val mse = 0.9263
Epoch 18: Train mse = 0.6870 | Val mse = 0.6816
Epoch 19: Train mse = 0.6533 | Val mse = 0.5498
Epoch 20: Train mse = 0.6374 | Val mse = 0.6488
Epoch 21: Train mse = 0.6399 | Val mse = 0.7327
Epoch 22: Train mse = 0.6536 | Val mse = 0.8391
Epoch 23: Train mse = 0.6309 | Val mse = 0.6833
Epoch 24: Train mse = 0.6133 | Val mse = 0.5995
Epoch 25: Train mse = 0.6098 | Val mse = 0.5958
Epoch 26: Train mse = 0.6611 | Val mse = 0.5477
Epoch 27: Train mse = 0.7278 | Val mse = 0.9080
Epoch 28: Train mse = 0.6887 | Val mse = 0.5293
Epoch 29: Train mse = 0.6600 | Val mse = 0.7193
Epoch 30: Train mse = 0.6405 | Val mse = 0.7425
Epoch 31: Train mse = 0.6301 | Val mse = 0.5127
Epoch 32: Train mse = 0.6296 | Val mse = 0.6902
Epoch 33: Train mse = 0.6112 | Val mse = 0.7864
Epoch 34: Train mse = 0.6073 | Val mse = 0.6058
Epoch 35: Train mse = 0.5996 | Val mse = 0.5828
Epoch 36: Train mse = 0.6023 | Val mse = 0.5594
Epoch 37: Train mse = 0.5960 | Val mse = 0.5534
Epoch 38: Train mse = 0.5994 | Val mse = 0.5489
Epoch 39: Train mse = 0.6066 | Val mse = 0.5423
Epoch 40: Train mse = 0.6001 | Val mse = 0.5931
Epoch 41: Train mse = 0.6030 | Val mse = 0.6706
Early stopping triggered at epoch 41.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 10
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.15799679
1 day(s) RMSE                      : 27.59039340
1 day(s) R2                        : -0.01201042
1 day(s) Pearson r                 : 0.03725842
1 day(s) QLIKE                     : 0.70512701
3 day(s) MAE                       : 4.24159162
3 day(s) RMSE                      : 27.60785856
3 day(s) R2                        : -0.01335037
3 day(s) Pearson r                 : 0.02322336
3 day(s) QLIKE                     : 0.71928524
5 day(s) MAE                       : 4.29887239
5 day(s) RMSE                      : 27.62561625
5 day(s) R2                        : -0.01471250
5 day(s) Pearson r                 : 0.01391111
5 day(s) QLIKE                     : 0.73467487
10 day(s) MAE                      : 4.39848848
10 day(s) RMSE                     : 27.64341170
10 day(s) R2                       : -0.01587111
10 day(s) Pearson r                : 0.00412119
10 day(s) QLIKE                    : 0.75186128
full horizon MAE                   : 4.39848848
full horizon RMSE                  : 27.64341170
full horizon R2                    : -0.01587111
full horizon Pearson r             : 0.00412119
full horizon QLIKE                 : 0.75186128

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/GE/Simple_KAN_H10.pkl

Saved y_true min=0.591065, max=527.531
Saved y_pred min=1.42643, max=17.0831

=== GE | H=20 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.10128934238557
  Min value:  -0.033788504883313815
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.120224031597557
  Min value:  -2.692943196140936
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4221295497976101
  Min value:  -0.03368162544886536
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.084344850046585
  Min value:  -2.4033958725510236
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4422974929765818
  Min value:  -0.03368162544886536
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.6879422007515705
  Min value:  -2.183075884328406
Epoch 1: Train mse = 0.9950 | Val mse = 1.0139
Epoch 2: Train mse = 0.9952 | Val mse = 0.9464
Epoch 3: Train mse = 0.9918 | Val mse = 0.9112
Epoch 4: Train mse = 0.9861 | Val mse = 0.9667
Epoch 5: Train mse = 0.9735 | Val mse = 0.8121
Epoch 6: Train mse = 0.9345 | Val mse = 0.9924
Epoch 7: Train mse = 0.8471 | Val mse = 0.9095
Epoch 8: Train mse = 0.8702 | Val mse = 1.3946
Epoch 9: Train mse = 0.8522 | Val mse = 0.5739
Epoch 10: Train mse = 0.8601 | Val mse = 0.7986
Epoch 11: Train mse = 0.8437 | Val mse = 1.0733
Epoch 12: Train mse = 0.8185 | Val mse = 0.6806
Epoch 13: Train mse = 0.7771 | Val mse = 0.9016
Epoch 14: Train mse = 0.7637 | Val mse = 0.5687
Epoch 15: Train mse = 0.7594 | Val mse = 0.9464
Epoch 16: Train mse = 0.7462 | Val mse = 0.7413
Epoch 17: Train mse = 0.7382 | Val mse = 0.5942
Epoch 18: Train mse = 0.7269 | Val mse = 1.0064
Epoch 19: Train mse = 0.7261 | Val mse = 0.9687
Epoch 20: Train mse = 0.7208 | Val mse = 0.6575
Epoch 21: Train mse = 0.6918 | Val mse = 0.6793
Epoch 22: Train mse = 0.6810 | Val mse = 0.8098
Epoch 23: Train mse = 0.6721 | Val mse = 0.6808
Epoch 24: Train mse = 0.6711 | Val mse = 0.5892
Epoch 25: Train mse = 0.7236 | Val mse = 0.5938
Epoch 26: Train mse = 0.8284 | Val mse = 1.1122
Epoch 27: Train mse = 0.7701 | Val mse = 0.5025
Epoch 28: Train mse = 0.7350 | Val mse = 0.8262
Epoch 29: Train mse = 0.7082 | Val mse = 0.5815
Epoch 30: Train mse = 0.6952 | Val mse = 0.7964
Epoch 31: Train mse = 0.6723 | Val mse = 0.6490
Epoch 32: Train mse = 0.6598 | Val mse = 0.6421
Epoch 33: Train mse = 0.6540 | Val mse = 0.6857
Epoch 34: Train mse = 0.6584 | Val mse = 0.6056
Epoch 35: Train mse = 0.6546 | Val mse = 0.6256
Epoch 36: Train mse = 0.6551 | Val mse = 0.5959
Epoch 37: Train mse = 0.6400 | Val mse = 0.6252
Early stopping triggered at epoch 37.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 20
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.04923403
1 day(s) RMSE                      : 27.56547182
1 day(s) R2                        : -0.01018301
1 day(s) Pearson r                 : 0.03514451
1 day(s) QLIKE                     : 0.67410237
3 day(s) MAE                       : 4.10956383
3 day(s) RMSE                      : 27.57387046
3 day(s) R2                        : -0.01085683
3 day(s) Pearson r                 : 0.02069674
3 day(s) QLIKE                     : 0.68865757
5 day(s) MAE                       : 4.13914545
5 day(s) RMSE                      : 27.58162556
5 day(s) R2                        : -0.01148344
5 day(s) Pearson r                 : 0.01110960
5 day(s) QLIKE                     : 0.69827925
10 day(s) MAE                      : 4.21873974
10 day(s) RMSE                     : 27.58894738
10 day(s) R2                       : -0.01187202
10 day(s) Pearson r                : 0.00180227
10 day(s) QLIKE                    : 0.70785386
20 day(s) MAE                      : 4.29949877
20 day(s) RMSE                     : 27.59103744
20 day(s) R2                       : -0.01197179
20 day(s) Pearson r                : -0.00881711
20 day(s) QLIKE                    : 0.71098707
full horizon MAE                   : 4.29949877
full horizon RMSE                  : 27.59103744
full horizon R2                    : -0.01197179
full horizon Pearson r             : -0.00881711
full horizon QLIKE                 : 0.71098707

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/GE/Simple_KAN_H20.pkl

Saved y_true min=0.591065, max=527.531
Saved y_pred min=2.02259, max=12.4522

=== BAC | H=1 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.142355974179488
  Min value:  -0.42151023550938554
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.10774278654742
  Min value:  -3.152671373228803
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.800014951166767
  Min value:  -0.36472825885137616
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.2542888447212466
  Min value:  -1.29240155253667
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.111754968787256
  Min value:  -0.38915813676977
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.207108669727663
  Min value:  -1.9461535420618146
Epoch 1: Train mse = 0.8978 | Val mse = 0.4862
Epoch 2: Train mse = 0.6703 | Val mse = 0.5709
Epoch 3: Train mse = 0.5363 | Val mse = 0.5123
Epoch 4: Train mse = 0.4982 | Val mse = 0.4936
Epoch 5: Train mse = 0.4467 | Val mse = 0.5356
Epoch 6: Train mse = 0.4204 | Val mse = 0.5009
Epoch 7: Train mse = 0.4047 | Val mse = 0.4902
Epoch 8: Train mse = 0.3820 | Val mse = 0.4955
Epoch 9: Train mse = 0.3728 | Val mse = 0.4614
Epoch 10: Train mse = 0.3750 | Val mse = 0.4587
Epoch 11: Train mse = 0.3576 | Val mse = 0.5482
Epoch 12: Train mse = 0.3616 | Val mse = 0.4538
Epoch 13: Train mse = 0.3497 | Val mse = 0.4734
Epoch 14: Train mse = 0.3317 | Val mse = 0.4995
Epoch 15: Train mse = 0.3171 | Val mse = 0.5033
Epoch 16: Train mse = 0.3102 | Val mse = 0.5365
Epoch 17: Train mse = 0.2975 | Val mse = 0.5563
Epoch 18: Train mse = 0.2860 | Val mse = 0.5437
Epoch 19: Train mse = 0.2777 | Val mse = 0.5784
Epoch 20: Train mse = 0.2746 | Val mse = 0.5386
Epoch 21: Train mse = 0.2567 | Val mse = 0.5776
Epoch 22: Train mse = 0.2494 | Val mse = 0.5938
Epoch 23: Train mse = 0.2427 | Val mse = 0.5651
Epoch 24: Train mse = 0.2156 | Val mse = 0.5514
Epoch 25: Train mse = 0.2145 | Val mse = 0.5837
Epoch 26: Train mse = 0.2208 | Val mse = 0.5175
Epoch 27: Train mse = 0.1894 | Val mse = 0.6090
Epoch 28: Train mse = 0.1766 | Val mse = 0.5857
Epoch 29: Train mse = 0.1776 | Val mse = 0.6595
Epoch 30: Train mse = 0.1796 | Val mse = 0.6054
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 1
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.32248551
1 day(s) RMSE                      : 7.40503532
1 day(s) R2                        : 0.09002190
1 day(s) Pearson r                 : 0.38391358
1 day(s) QLIKE                     : 0.27749442
full horizon MAE                   : 2.32248551
full horizon RMSE                  : 7.40503532
full horizon R2                    : 0.09002190
full horizon Pearson r             : 0.38391358
full horizon QLIKE                 : 0.27749442

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/BAC/Simple_KAN_H1.pkl

Saved y_true min=0.587715, max=111.472
Saved y_pred min=0.80017, max=56.1842

=== BAC | H=5 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.142355974179488
  Min value:  -0.42151023550938554
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.106232115580275
  Min value:  -3.150569092336922
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.800014951166767
  Min value:  -0.36472825885137616
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.253588840112375
  Min value:  -1.2911129191419919
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.111754968787256
  Min value:  -0.38915813676977
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.205991919409497
  Min value:  -1.9445789696790647
Epoch 1: Train mse = 0.9229 | Val mse = 0.4848
Epoch 2: Train mse = 0.7946 | Val mse = 0.5313
Epoch 3: Train mse = 0.6608 | Val mse = 0.5518
Epoch 4: Train mse = 0.6019 | Val mse = 0.5543
Epoch 5: Train mse = 0.5634 | Val mse = 0.5792
Epoch 6: Train mse = 0.5244 | Val mse = 0.5126
Epoch 7: Train mse = 0.5315 | Val mse = 0.4872
Epoch 8: Train mse = 0.5102 | Val mse = 0.5598
Epoch 9: Train mse = 0.4969 | Val mse = 0.5416
Epoch 10: Train mse = 0.4916 | Val mse = 0.5363
Epoch 11: Train mse = 0.4810 | Val mse = 0.5590
Epoch 12: Train mse = 0.4741 | Val mse = 0.4990
Epoch 13: Train mse = 0.4657 | Val mse = 0.5009
Epoch 14: Train mse = 0.4567 | Val mse = 0.5197
Epoch 15: Train mse = 0.4521 | Val mse = 0.5224
Epoch 16: Train mse = 0.4457 | Val mse = 0.4972
Epoch 17: Train mse = 0.4383 | Val mse = 0.4982
Epoch 18: Train mse = 0.4282 | Val mse = 0.5100
Epoch 19: Train mse = 0.4204 | Val mse = 0.5303
Epoch 20: Train mse = 0.4127 | Val mse = 0.5008
Epoch 21: Train mse = 0.4018 | Val mse = 0.5166
Epoch 22: Train mse = 0.3990 | Val mse = 0.5108
Epoch 23: Train mse = 0.3883 | Val mse = 0.5325
Epoch 24: Train mse = 0.3789 | Val mse = 0.4969
Epoch 25: Train mse = 0.3847 | Val mse = 0.5318
Epoch 26: Train mse = 0.3898 | Val mse = 0.4996
Epoch 27: Train mse = 0.3729 | Val mse = 0.5484
Epoch 28: Train mse = 0.3580 | Val mse = 0.5838
Epoch 29: Train mse = 0.3552 | Val mse = 0.5467
Epoch 30: Train mse = 0.3385 | Val mse = 0.5054
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 5
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.21178126
1 day(s) RMSE                      : 7.74989853
1 day(s) R2                        : 0.00329026
1 day(s) Pearson r                 : 0.27091580
1 day(s) QLIKE                     : 0.39295049
3 day(s) MAE                       : 2.21331913
3 day(s) RMSE                      : 7.78556445
3 day(s) R2                        : -0.00526251
3 day(s) Pearson r                 : 0.16470286
3 day(s) QLIKE                     : 0.40670693
5 day(s) MAE                       : 2.22979695
5 day(s) RMSE                      : 7.80706778
5 day(s) R2                        : -0.01051084
5 day(s) Pearson r                 : 0.10295889
5 day(s) QLIKE                     : 0.41657109
full horizon MAE                   : 2.22979695
full horizon RMSE                  : 7.80706778
full horizon R2                    : -0.01051084
full horizon Pearson r             : 0.10295889
full horizon QLIKE                 : 0.41657109

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/BAC/Simple_KAN_H5.pkl

Saved y_true min=0.587715, max=111.472
Saved y_pred min=2.27473, max=7.08089

=== BAC | H=10 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 10
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.142355974179488
  Min value:  -0.42151023550938554
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.1031963312484585
  Min value:  -3.149203897636293
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.800014951166767
  Min value:  -0.36472825885137616
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.251540537979127
  Min value:  -1.2907388380002232
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.111754968787256
  Min value:  -0.38915813676977
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.203435974440135
  Min value:  -1.9438565829024124
Epoch 1: Train mse = 0.9343 | Val mse = 0.4859
Epoch 2: Train mse = 0.8449 | Val mse = 0.5617
Epoch 3: Train mse = 0.7141 | Val mse = 0.5591
Epoch 4: Train mse = 0.6655 | Val mse = 0.5290
Epoch 5: Train mse = 0.6293 | Val mse = 0.6302
Epoch 6: Train mse = 0.6036 | Val mse = 0.5290
Epoch 7: Train mse = 0.5909 | Val mse = 0.5108
Epoch 8: Train mse = 0.5759 | Val mse = 0.5317
Epoch 9: Train mse = 0.5658 | Val mse = 0.5797
Epoch 10: Train mse = 0.5533 | Val mse = 0.5438
Epoch 11: Train mse = 0.5481 | Val mse = 0.5641
Epoch 12: Train mse = 0.5416 | Val mse = 0.5363
Epoch 13: Train mse = 0.5307 | Val mse = 0.5050
Epoch 14: Train mse = 0.5283 | Val mse = 0.5586
Epoch 15: Train mse = 0.5221 | Val mse = 0.5655
Epoch 16: Train mse = 0.5168 | Val mse = 0.5329
Epoch 17: Train mse = 0.5098 | Val mse = 0.5281
Epoch 18: Train mse = 0.4979 | Val mse = 0.5347
Epoch 19: Train mse = 0.4855 | Val mse = 0.5369
Epoch 20: Train mse = 0.4798 | Val mse = 0.5124
Epoch 21: Train mse = 0.4741 | Val mse = 0.5054
Epoch 22: Train mse = 0.4647 | Val mse = 0.4886
Epoch 23: Train mse = 0.4540 | Val mse = 0.5257
Epoch 24: Train mse = 0.4482 | Val mse = 0.5107
Epoch 25: Train mse = 0.4507 | Val mse = 0.5584
Epoch 26: Train mse = 0.4668 | Val mse = 0.5286
Epoch 27: Train mse = 0.4434 | Val mse = 0.6255
Epoch 28: Train mse = 0.4282 | Val mse = 0.5851
Epoch 29: Train mse = 0.4151 | Val mse = 0.5653
Epoch 30: Train mse = 0.4042 | Val mse = 0.5387
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 10
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.22964787
1 day(s) RMSE                      : 7.75387118
1 day(s) R2                        : 0.00226816
1 day(s) Pearson r                 : 0.26805620
1 day(s) QLIKE                     : 0.39586678
3 day(s) MAE                       : 2.22744967
3 day(s) RMSE                      : 7.78856867
3 day(s) R2                        : -0.00603846
3 day(s) Pearson r                 : 0.14812849
3 day(s) QLIKE                     : 0.40925825
5 day(s) MAE                       : 2.23816100
5 day(s) RMSE                      : 7.80838423
5 day(s) R2                        : -0.01085166
5 day(s) Pearson r                 : 0.09417730
5 day(s) QLIKE                     : 0.41786883
10 day(s) MAE                      : 2.24588891
10 day(s) RMSE                     : 7.83285846
10 day(s) R2                       : -0.01710181
10 day(s) Pearson r                : 0.03276458
10 day(s) QLIKE                    : 0.42578014
full horizon MAE                   : 2.24588891
full horizon RMSE                  : 7.83285846
full horizon R2                    : -0.01710181
full horizon Pearson r             : 0.03276458
full horizon QLIKE                 : 0.42578014

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/BAC/Simple_KAN_H10.pkl

Saved y_true min=0.587715, max=111.472
Saved y_pred min=2.23526, max=6.88173

=== BAC | H=20 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.142355974179488
  Min value:  -0.42151023550938554
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.103268362353564
  Min value:  -3.1487890186062457
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.800014951166767
  Min value:  -0.36472825885137616
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.251689496563368
  Min value:  -1.290401169341898
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.111754968787256
  Min value:  -0.38915813676977
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.203545386302683
  Min value:  -1.9434917803124587
Epoch 1: Train mse = 0.9506 | Val mse = 0.4977
Epoch 2: Train mse = 0.8777 | Val mse = 0.5899
Epoch 3: Train mse = 0.7766 | Val mse = 0.5897
Epoch 4: Train mse = 0.7361 | Val mse = 0.5351
Epoch 5: Train mse = 0.7135 | Val mse = 0.5881
Epoch 6: Train mse = 0.6741 | Val mse = 0.5655
Epoch 7: Train mse = 0.6729 | Val mse = 0.5181
Epoch 8: Train mse = 0.6633 | Val mse = 0.5430
Epoch 9: Train mse = 0.6487 | Val mse = 0.5723
Epoch 10: Train mse = 0.6414 | Val mse = 0.5718
Epoch 11: Train mse = 0.6280 | Val mse = 0.5786
Epoch 12: Train mse = 0.6239 | Val mse = 0.5517
Epoch 13: Train mse = 0.6088 | Val mse = 0.5575
Epoch 14: Train mse = 0.6045 | Val mse = 0.5769
Epoch 15: Train mse = 0.5963 | Val mse = 0.5609
Epoch 16: Train mse = 0.5878 | Val mse = 0.5353
Epoch 17: Train mse = 0.5850 | Val mse = 0.5131
Epoch 18: Train mse = 0.5714 | Val mse = 0.5057
Epoch 19: Train mse = 0.5608 | Val mse = 0.5275
Epoch 20: Train mse = 0.5567 | Val mse = 0.4912
Epoch 21: Train mse = 0.5361 | Val mse = 0.5721
Epoch 22: Train mse = 0.5389 | Val mse = 0.5015
Epoch 23: Train mse = 0.5166 | Val mse = 0.5074
Epoch 24: Train mse = 0.4981 | Val mse = 0.5205
Epoch 25: Train mse = 0.4874 | Val mse = 0.5096
Epoch 26: Train mse = 0.4730 | Val mse = 0.5303
Epoch 27: Train mse = 0.4652 | Val mse = 0.5051
Epoch 28: Train mse = 0.4509 | Val mse = 0.5176
Epoch 29: Train mse = 0.4414 | Val mse = 0.4946
Epoch 30: Train mse = 0.4315 | Val mse = 0.5135
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 20
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.31864956
1 day(s) RMSE                      : 7.58831995
1 day(s) R2                        : 0.04441805
1 day(s) Pearson r                 : 0.35499284
1 day(s) QLIKE                     : 0.33021818
3 day(s) MAE                       : 2.57927904
3 day(s) RMSE                      : 8.67198819
3 day(s) R2                        : -0.24720152
3 day(s) Pearson r                 : 0.24534090
3 day(s) QLIKE                     : 0.39520204
5 day(s) MAE                       : 2.87147396
5 day(s) RMSE                      : 9.94869126
5 day(s) R2                        : -0.64095618
5 day(s) Pearson r                 : 0.14366147
5 day(s) QLIKE                     : 0.43984295
10 day(s) MAE                      : 3.26391040
10 day(s) RMSE                     : 11.98434117
10 day(s) R2                       : -1.38096053
10 day(s) Pearson r                : 0.05481245
10 day(s) QLIKE                    : 0.48681341
20 day(s) MAE                      : 3.15080940
20 day(s) RMSE                     : 10.81349373
20 day(s) R2                       : -0.93909450
20 day(s) Pearson r                : 0.02708136
20 day(s) QLIKE                    : 0.48864557
full horizon MAE                   : 3.15080940
full horizon RMSE                  : 10.81349373
full horizon R2                    : -0.93909450
full horizon Pearson r             : 0.02708136
full horizon QLIKE                 : 0.48864557

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/BAC/Simple_KAN_H20.pkl

Saved y_true min=0.587715, max=111.472
Saved y_pred min=1.56557, max=151.614

=== C | H=1 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.8091406863669
  Min value:  -0.37248441284063266
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.116803264609633
  Min value:  -2.5790852911299234
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.018765610501212
  Min value:  -0.35593837785433546
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.6474166417153135
  Min value:  -1.663048077620065
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.615585669042774
  Min value:  -0.3593653801213122
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.113246907180982
  Min value:  -2.058904827000899
Epoch 1: Train mse = 0.8934 | Val mse = 0.4770
Epoch 2: Train mse = 0.7021 | Val mse = 0.4134
Epoch 3: Train mse = 0.5647 | Val mse = 0.4388
Epoch 4: Train mse = 0.4812 | Val mse = 0.3935
Epoch 5: Train mse = 0.4676 | Val mse = 0.4134
Epoch 6: Train mse = 0.4242 | Val mse = 0.4182
Epoch 7: Train mse = 0.4053 | Val mse = 0.3837
Epoch 8: Train mse = 0.3800 | Val mse = 0.3785
Epoch 9: Train mse = 0.3758 | Val mse = 0.4042
Epoch 10: Train mse = 0.3614 | Val mse = 0.4045
Epoch 11: Train mse = 0.3495 | Val mse = 0.4181
Epoch 12: Train mse = 0.3449 | Val mse = 0.4031
Epoch 13: Train mse = 0.3367 | Val mse = 0.3805
Epoch 14: Train mse = 0.3307 | Val mse = 0.3992
Epoch 15: Train mse = 0.3288 | Val mse = 0.4106
Epoch 16: Train mse = 0.3132 | Val mse = 0.3886
Epoch 17: Train mse = 0.3093 | Val mse = 0.3946
Epoch 18: Train mse = 0.3016 | Val mse = 0.3994
Epoch 19: Train mse = 0.2950 | Val mse = 0.3972
Epoch 20: Train mse = 0.2873 | Val mse = 0.4031
Epoch 21: Train mse = 0.2761 | Val mse = 0.3991
Epoch 22: Train mse = 0.2696 | Val mse = 0.4205
Epoch 23: Train mse = 0.2997 | Val mse = 0.4034
Epoch 24: Train mse = 0.2667 | Val mse = 0.4115
Epoch 25: Train mse = 0.2584 | Val mse = 0.4457
Epoch 26: Train mse = 0.2516 | Val mse = 0.4219
Epoch 27: Train mse = 0.2286 | Val mse = 0.4387
Epoch 28: Train mse = 0.2222 | Val mse = 0.4481
Epoch 29: Train mse = 0.2098 | Val mse = 0.4753
Epoch 30: Train mse = 0.1959 | Val mse = 0.4796
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 1
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.69971305
1 day(s) RMSE                      : 8.42544081
1 day(s) R2                        : 0.20324468
1 day(s) Pearson r                 : 0.46558478
1 day(s) QLIKE                     : 0.28112754
full horizon MAE                   : 2.69971305
full horizon RMSE                  : 8.42544081
full horizon R2                    : 0.20324468
full horizon Pearson r             : 0.46558478
full horizon QLIKE                 : 0.28112754

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/C/Simple_KAN_H1.pkl

Saved y_true min=0.525001, max=134.647
Saved y_pred min=1.53587, max=39.3556

=== C | H=5 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.8091406863669
  Min value:  -0.37248441284063266
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.113808938949674
  Min value:  -2.5779301635708127
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.018765610501212
  Min value:  -0.35593837785433546
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.6457537548096046
  Min value:  -1.6623868571045473
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.615585669042774
  Min value:  -0.3593653801213122
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.110793676954993
  Min value:  -2.05803016926483
Epoch 1: Train mse = 0.9145 | Val mse = 0.4511
Epoch 2: Train mse = 0.7911 | Val mse = 0.4429
Epoch 3: Train mse = 0.6296 | Val mse = 0.4850
Epoch 4: Train mse = 0.5652 | Val mse = 0.4610
Epoch 5: Train mse = 0.5333 | Val mse = 0.4873
Epoch 6: Train mse = 0.4942 | Val mse = 0.4365
Epoch 7: Train mse = 0.4924 | Val mse = 0.4056
Epoch 8: Train mse = 0.4796 | Val mse = 0.4272
Epoch 9: Train mse = 0.4636 | Val mse = 0.4503
Epoch 10: Train mse = 0.4537 | Val mse = 0.4408
Epoch 11: Train mse = 0.4469 | Val mse = 0.4622
Epoch 12: Train mse = 0.4418 | Val mse = 0.4640
Epoch 13: Train mse = 0.4339 | Val mse = 0.4271
Epoch 14: Train mse = 0.4291 | Val mse = 0.4318
Epoch 15: Train mse = 0.4237 | Val mse = 0.4523
Epoch 16: Train mse = 0.4182 | Val mse = 0.4423
Epoch 17: Train mse = 0.4132 | Val mse = 0.4389
Epoch 18: Train mse = 0.4071 | Val mse = 0.4372
Epoch 19: Train mse = 0.4021 | Val mse = 0.4270
Epoch 20: Train mse = 0.3983 | Val mse = 0.4397
Epoch 21: Train mse = 0.3996 | Val mse = 0.4337
Epoch 22: Train mse = 0.3856 | Val mse = 0.4485
Epoch 23: Train mse = 0.3824 | Val mse = 0.4418
Epoch 24: Train mse = 0.3710 | Val mse = 0.4582
Epoch 25: Train mse = 0.3753 | Val mse = 0.4567
Epoch 26: Train mse = 0.3766 | Val mse = 0.4451
Epoch 27: Train mse = 0.3697 | Val mse = 0.4480
Epoch 28: Train mse = 0.3598 | Val mse = 0.5210
Epoch 29: Train mse = 0.3576 | Val mse = 0.4584
Epoch 30: Train mse = 0.3352 | Val mse = 0.4600
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 5
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.63411755
1 day(s) RMSE                      : 8.64160279
1 day(s) R2                        : 0.16183734
1 day(s) Pearson r                 : 0.46474565
1 day(s) QLIKE                     : 0.29384145
3 day(s) MAE                       : 2.76023733
3 day(s) RMSE                      : 8.92380650
3 day(s) R2                        : 0.11055911
3 day(s) Pearson r                 : 0.36610962
3 day(s) QLIKE                     : 0.33343703
5 day(s) MAE                       : 2.86213265
5 day(s) RMSE                      : 9.19555339
5 day(s) R2                        : 0.05857048
5 day(s) Pearson r                 : 0.27734426
5 day(s) QLIKE                     : 0.36911791
full horizon MAE                   : 2.86213265
full horizon RMSE                  : 9.19555339
full horizon R2                    : 0.05857048
full horizon Pearson r             : 0.27734426
full horizon QLIKE                 : 0.36911791

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/C/Simple_KAN_H5.pkl

Saved y_true min=0.525001, max=134.647
Saved y_pred min=1.8951, max=33.1391

=== C | H=10 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 10
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.8091406863669
  Min value:  -0.37248441284063266
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.111052537752696
  Min value:  -2.577782485226596
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.018765610501212
  Min value:  -0.35593837785433546
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.6439291882044227
  Min value:  -1.662584849687275
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.615585669042774
  Min value:  -0.3593653801213122
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.1084159724432565
  Min value:  -2.0580787834517045
Epoch 1: Train mse = 0.9255 | Val mse = 0.4429
Epoch 2: Train mse = 0.8107 | Val mse = 0.4726
Epoch 3: Train mse = 0.6696 | Val mse = 0.5076
Epoch 4: Train mse = 0.6173 | Val mse = 0.4688
Epoch 5: Train mse = 0.5826 | Val mse = 0.5283
Epoch 6: Train mse = 0.5585 | Val mse = 0.4594
Epoch 7: Train mse = 0.5401 | Val mse = 0.4350
Epoch 8: Train mse = 0.5290 | Val mse = 0.4529
Epoch 9: Train mse = 0.5192 | Val mse = 0.4883
Epoch 10: Train mse = 0.5077 | Val mse = 0.4624
Epoch 11: Train mse = 0.4996 | Val mse = 0.4940
Epoch 12: Train mse = 0.4966 | Val mse = 0.4758
Epoch 13: Train mse = 0.4857 | Val mse = 0.4617
Epoch 14: Train mse = 0.4825 | Val mse = 0.4810
Epoch 15: Train mse = 0.4748 | Val mse = 0.5032
Epoch 16: Train mse = 0.4687 | Val mse = 0.4697
Epoch 17: Train mse = 0.4664 | Val mse = 0.4634
Epoch 18: Train mse = 0.4594 | Val mse = 0.4631
Epoch 19: Train mse = 0.4548 | Val mse = 0.4696
Epoch 20: Train mse = 0.4469 | Val mse = 0.4658
Epoch 21: Train mse = 0.4468 | Val mse = 0.4658
Epoch 22: Train mse = 0.4363 | Val mse = 0.4737
Epoch 23: Train mse = 0.4282 | Val mse = 0.4809
Epoch 24: Train mse = 0.4204 | Val mse = 0.5014
Epoch 25: Train mse = 0.4296 | Val mse = 0.4796
Epoch 26: Train mse = 0.4359 | Val mse = 0.5006
Epoch 27: Train mse = 0.4317 | Val mse = 0.5318
Epoch 28: Train mse = 0.4155 | Val mse = 0.5049
Epoch 29: Train mse = 0.4037 | Val mse = 0.4711
Epoch 30: Train mse = 0.3969 | Val mse = 0.4988
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 10
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.61040396
1 day(s) RMSE                      : 8.60602532
1 day(s) R2                        : 0.16872457
1 day(s) Pearson r                 : 0.45305860
1 day(s) QLIKE                     : 0.29901361
3 day(s) MAE                       : 2.74730117
3 day(s) RMSE                      : 8.93349626
3 day(s) R2                        : 0.10862649
3 day(s) Pearson r                 : 0.36409745
3 day(s) QLIKE                     : 0.34102152
5 day(s) MAE                       : 2.85207451
5 day(s) RMSE                      : 9.21651563
5 day(s) R2                        : 0.05427341
5 day(s) Pearson r                 : 0.28042244
5 day(s) QLIKE                     : 0.37606638
10 day(s) MAE                      : 3.01836111
10 day(s) RMSE                     : 9.55594375
10 day(s) R2                       : -0.01429516
10 day(s) Pearson r                : 0.16180151
10 day(s) QLIKE                    : 0.42127070
full horizon MAE                   : 3.01836111
full horizon RMSE                  : 9.55594375
full horizon R2                    : -0.01429516
full horizon Pearson r             : 0.16180151
full horizon QLIKE                 : 0.42127070

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/C/Simple_KAN_H10.pkl

Saved y_true min=0.525001, max=134.647
Saved y_pred min=1.66803, max=39.8535

=== C | H=20 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.8091406863669
  Min value:  -0.37248441284063266
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.111456003225553
  Min value:  -2.5798794252140986
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.018765610501212
  Min value:  -0.35593837785433546
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.643530346347736
  Min value:  -1.664384167842813
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.615585669042774
  Min value:  -0.3593653801213122
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.1084933809861415
  Min value:  -2.0600067160447444
Epoch 1: Train mse = 0.9430 | Val mse = 0.4421
Epoch 2: Train mse = 0.8323 | Val mse = 0.4982
Epoch 3: Train mse = 0.7310 | Val mse = 0.5381
Epoch 4: Train mse = 0.6875 | Val mse = 0.4780
Epoch 5: Train mse = 0.6672 | Val mse = 0.5002
Epoch 6: Train mse = 0.6252 | Val mse = 0.5065
Epoch 7: Train mse = 0.6250 | Val mse = 0.4756
Epoch 8: Train mse = 0.6208 | Val mse = 0.4754
Epoch 9: Train mse = 0.6014 | Val mse = 0.5105
Epoch 10: Train mse = 0.5933 | Val mse = 0.5182
Epoch 11: Train mse = 0.5836 | Val mse = 0.5187
Epoch 12: Train mse = 0.5837 | Val mse = 0.5041
Epoch 13: Train mse = 0.5667 | Val mse = 0.5189
Epoch 14: Train mse = 0.5694 | Val mse = 0.5423
Epoch 15: Train mse = 0.5603 | Val mse = 0.5298
Epoch 16: Train mse = 0.5554 | Val mse = 0.5260
Epoch 17: Train mse = 0.5499 | Val mse = 0.5366
Epoch 18: Train mse = 0.5516 | Val mse = 0.5128
Epoch 19: Train mse = 0.5412 | Val mse = 0.5055
Epoch 20: Train mse = 0.5353 | Val mse = 0.5497
Epoch 21: Train mse = 0.5370 | Val mse = 0.5317
Epoch 22: Train mse = 0.5241 | Val mse = 0.5167
Epoch 23: Train mse = 0.5202 | Val mse = 0.5362
Epoch 24: Train mse = 0.5095 | Val mse = 0.5607
Epoch 25: Train mse = 0.5063 | Val mse = 0.5295
Epoch 26: Train mse = 0.5099 | Val mse = 0.5136
Epoch 27: Train mse = 0.5000 | Val mse = 0.5654
Epoch 28: Train mse = 0.4967 | Val mse = 0.4970
Epoch 29: Train mse = 0.4977 | Val mse = 0.5396
Epoch 30: Train mse = 0.4881 | Val mse = 0.5073
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 20
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.77421305
1 day(s) RMSE                      : 9.49832408
1 day(s) R2                        : -0.01259009
1 day(s) Pearson r                 : 0.32541167
1 day(s) QLIKE                     : 0.40150704
3 day(s) MAE                       : 2.78338282
3 day(s) RMSE                      : 9.56612407
3 day(s) R2                        : -0.02208925
3 day(s) Pearson r                 : 0.19829641
3 day(s) QLIKE                     : 0.40910354
5 day(s) MAE                       : 2.81320025
5 day(s) RMSE                      : 9.60148133
5 day(s) R2                        : -0.02638089
5 day(s) Pearson r                 : 0.15886653
5 day(s) QLIKE                     : 0.41772220
10 day(s) MAE                      : 2.84345267
10 day(s) RMSE                     : 9.64550828
10 day(s) R2                       : -0.03339753
10 day(s) Pearson r                : 0.08866893
10 day(s) QLIKE                    : 0.42674628
20 day(s) MAE                      : 2.86892442
20 day(s) RMSE                     : 9.66627907
20 day(s) R2                       : -0.03703086
20 day(s) Pearson r                : 0.04622948
20 day(s) QLIKE                    : 0.42970293
full horizon MAE                   : 2.86892442
full horizon RMSE                  : 9.66627907
full horizon R2                    : -0.03703086
full horizon Pearson r             : 0.04622948
full horizon QLIKE                 : 0.42970293

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/C/Simple_KAN_H20.pkl

Saved y_true min=0.525001, max=134.647
Saved y_pred min=2.48092, max=6.95947

=== BTCUSDT | H=1 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2399
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1727, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  27.572355514245807
  Min value:  -0.39529821705307827
Checking X_time_train_core:
Shape: (1727, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.8178490491226165
Checking y_train_core (log_mse scaled):
Shape: (1727, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.551349524903821
  Min value:  -3.6053341677678
Checking X_price_val:
Shape: (192, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.1802886452741648
  Min value:  -0.3973706893292718
Checking X_time_val:
Shape: (192, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_val (log_mse scaled):
Shape: (192, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.344819091237707
  Min value:  -4.24132665249135
Checking X_price_test:
Shape: (480, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.92735513679459
  Min value:  -0.3866725144992877
Checking X_time_test:
Shape: (480, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_test (log_mse scaled):
Shape: (480, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.58025865739623
  Min value:  -2.56748600221835
Epoch 1: Train mse = 0.9235 | Val mse = 1.2028
Epoch 2: Train mse = 0.6479 | Val mse = 0.9519
Epoch 3: Train mse = 0.5070 | Val mse = 0.8800
Epoch 4: Train mse = 0.4900 | Val mse = 0.8065
Epoch 5: Train mse = 0.4271 | Val mse = 0.8932
Epoch 6: Train mse = 0.4173 | Val mse = 0.7495
Epoch 7: Train mse = 0.3613 | Val mse = 0.7307
Epoch 8: Train mse = 0.3369 | Val mse = 0.7459
Epoch 9: Train mse = 0.3381 | Val mse = 0.7291
Epoch 10: Train mse = 0.3129 | Val mse = 0.7202
Epoch 11: Train mse = 0.3046 | Val mse = 0.7151
Epoch 12: Train mse = 0.2799 | Val mse = 0.7070
Epoch 13: Train mse = 0.2641 | Val mse = 0.7117
Epoch 14: Train mse = 0.2419 | Val mse = 0.7241
Epoch 15: Train mse = 0.2147 | Val mse = 0.7431
Epoch 16: Train mse = 0.2038 | Val mse = 0.7817
Epoch 17: Train mse = 0.1931 | Val mse = 0.8258
Epoch 18: Train mse = 0.2053 | Val mse = 0.7863
Epoch 19: Train mse = 0.1851 | Val mse = 0.7572
Epoch 20: Train mse = 0.1786 | Val mse = 0.7703
Epoch 21: Train mse = 0.1487 | Val mse = 0.8596
Epoch 22: Train mse = 0.1359 | Val mse = 0.8523
Epoch 23: Train mse = 0.1080 | Val mse = 0.9304
Epoch 24: Train mse = 0.0964 | Val mse = 0.9180
Epoch 25: Train mse = 0.0865 | Val mse = 0.9767
Epoch 26: Train mse = 0.0717 | Val mse = 0.9699
Epoch 27: Train mse = 0.0667 | Val mse = 0.9687
Epoch 28: Train mse = 0.0649 | Val mse = 0.9666
Epoch 29: Train mse = 0.0665 | Val mse = 1.0709
Epoch 30: Train mse = 0.0489 | Val mse = 1.1359
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 1
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 3.93152107
1 day(s) RMSE                      : 8.65436862
1 day(s) R2                        : 0.11238883
1 day(s) Pearson r                 : 0.41053959
1 day(s) QLIKE                     : 0.24344526
full horizon MAE                   : 3.93152107
full horizon RMSE                  : 8.65436862
full horizon R2                    : 0.11238883
full horizon Pearson r             : 0.41053959
full horizon QLIKE                 : 0.24344526

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/BTCUSDT/Simple_KAN_H1.pkl

Saved y_true min=0.505764, max=131.49
Saved y_pred min=0.778153, max=68.0627

=== BTCUSDT | H=5 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2399
Time steps for y: 5
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1727, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  27.572355514245807
  Min value:  -0.39529821705307827
Checking X_time_train_core:
Shape: (1727, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.8178490491226165
Checking y_train_core (log_mse scaled):
Shape: (1727, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.554196812284146
  Min value:  -3.6087233634991427
Checking X_price_val:
Shape: (192, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.1802886452741648
  Min value:  -0.3973706893292718
Checking X_time_val:
Shape: (192, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_val (log_mse scaled):
Shape: (192, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3459792971639226
  Min value:  -4.245202118935102
Checking X_price_test:
Shape: (480, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.92735513679459
  Min value:  -0.3866725144992877
Checking X_time_test:
Shape: (480, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_test (log_mse scaled):
Shape: (480, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.58159887703065
  Min value:  -2.5700816741597805
Epoch 1: Train mse = 0.9471 | Val mse = 1.4447
Epoch 2: Train mse = 0.7659 | Val mse = 1.1271
Epoch 3: Train mse = 0.6611 | Val mse = 1.0491
Epoch 4: Train mse = 0.6029 | Val mse = 0.9739
Epoch 5: Train mse = 0.5672 | Val mse = 1.0189
Epoch 6: Train mse = 0.5551 | Val mse = 0.9822
Epoch 7: Train mse = 0.5193 | Val mse = 0.9328
Epoch 8: Train mse = 0.4999 | Val mse = 0.9773
Epoch 9: Train mse = 0.4910 | Val mse = 0.9698
Epoch 10: Train mse = 0.4829 | Val mse = 1.0214
Epoch 11: Train mse = 0.4653 | Val mse = 1.0571
Epoch 12: Train mse = 0.4379 | Val mse = 1.1345
Epoch 13: Train mse = 0.4166 | Val mse = 1.0971
Epoch 14: Train mse = 0.3973 | Val mse = 1.0834
Epoch 15: Train mse = 0.3717 | Val mse = 1.0841
Epoch 16: Train mse = 0.3666 | Val mse = 1.0910
Epoch 17: Train mse = 0.3524 | Val mse = 1.0773
Epoch 18: Train mse = 0.3468 | Val mse = 1.0857
Epoch 19: Train mse = 0.3241 | Val mse = 1.0912
Epoch 20: Train mse = 0.2962 | Val mse = 1.2733
Epoch 21: Train mse = 0.2741 | Val mse = 1.1980
Epoch 22: Train mse = 0.2535 | Val mse = 1.3270
Epoch 23: Train mse = 0.2422 | Val mse = 1.0998
Epoch 24: Train mse = 0.2295 | Val mse = 1.2154
Epoch 25: Train mse = 0.2095 | Val mse = 1.2052
Epoch 26: Train mse = 0.1952 | Val mse = 1.1485
Epoch 27: Train mse = 0.1905 | Val mse = 1.1731
Epoch 28: Train mse = 0.1788 | Val mse = 1.3124
Epoch 29: Train mse = 0.1771 | Val mse = 1.2596
Epoch 30: Train mse = 0.1661 | Val mse = 1.3182
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 5
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.17566045
1 day(s) RMSE                      : 8.82504886
1 day(s) R2                        : 0.07703290
1 day(s) Pearson r                 : 0.33279890
1 day(s) QLIKE                     : 0.30199962
3 day(s) MAE                       : 4.30681087
3 day(s) RMSE                      : 9.00230834
3 day(s) R2                        : 0.03971627
3 day(s) Pearson r                 : 0.28154969
3 day(s) QLIKE                     : 0.32797879
5 day(s) MAE                       : 4.44853271
5 day(s) RMSE                      : 9.15721920
5 day(s) R2                        : 0.00607141
5 day(s) Pearson r                 : 0.23119251
5 day(s) QLIKE                     : 0.34783235
full horizon MAE                   : 4.44853271
full horizon RMSE                  : 9.15721920
full horizon R2                    : 0.00607141
full horizon Pearson r             : 0.23119251
full horizon QLIKE                 : 0.34783235

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/BTCUSDT/Simple_KAN_H5.pkl

Saved y_true min=0.505764, max=131.49
Saved y_pred min=2.33678, max=41.7041

=== BTCUSDT | H=10 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2399
Time steps for y: 10
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1727, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  27.572355514245807
  Min value:  -0.39529821705307827
Checking X_time_train_core:
Shape: (1727, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.8178490491226165
Checking y_train_core (log_mse scaled):
Shape: (1727, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.556915178048552
  Min value:  -3.6122923012225283
Checking X_price_val:
Shape: (192, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.1802886452741648
  Min value:  -0.3973706893292718
Checking X_time_val:
Shape: (192, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_val (log_mse scaled):
Shape: (192, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.346996833643509
  Min value:  -4.249261289934421
Checking X_price_test:
Shape: (480, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.92735513679459
  Min value:  -0.3866725144992877
Checking X_time_test:
Shape: (480, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_test (log_mse scaled):
Shape: (480, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.582797894129229
  Min value:  -2.572850621760107
Epoch 1: Train mse = 0.9606 | Val mse = 1.4663
Epoch 2: Train mse = 0.8049 | Val mse = 1.2462
Epoch 3: Train mse = 0.7338 | Val mse = 1.0203
Epoch 4: Train mse = 0.6791 | Val mse = 0.9961
Epoch 5: Train mse = 0.6559 | Val mse = 1.0150
Epoch 6: Train mse = 0.6332 | Val mse = 1.0387
Epoch 7: Train mse = 0.6097 | Val mse = 1.0696
Epoch 8: Train mse = 0.5961 | Val mse = 1.0964
Epoch 9: Train mse = 0.5758 | Val mse = 1.0641
Epoch 10: Train mse = 0.5552 | Val mse = 1.1807
Epoch 11: Train mse = 0.5402 | Val mse = 1.2505
Epoch 12: Train mse = 0.5132 | Val mse = 1.2964
Epoch 13: Train mse = 0.4946 | Val mse = 1.2988
Epoch 14: Train mse = 0.4649 | Val mse = 1.4752
Epoch 15: Train mse = 0.4431 | Val mse = 1.3908
Epoch 16: Train mse = 0.4260 | Val mse = 1.4352
Epoch 17: Train mse = 0.4048 | Val mse = 1.3286
Epoch 18: Train mse = 0.4080 | Val mse = 1.4703
Epoch 19: Train mse = 0.3786 | Val mse = 1.2405
Epoch 20: Train mse = 0.3638 | Val mse = 1.6585
Epoch 21: Train mse = 0.3406 | Val mse = 1.3534
Epoch 22: Train mse = 0.3240 | Val mse = 1.7585
Epoch 23: Train mse = 0.3124 | Val mse = 1.3704
Epoch 24: Train mse = 0.2972 | Val mse = 1.3470
Epoch 25: Train mse = 0.2824 | Val mse = 1.5780
Epoch 26: Train mse = 0.2673 | Val mse = 1.5686
Epoch 27: Train mse = 0.2560 | Val mse = 1.6141
Epoch 28: Train mse = 0.2466 | Val mse = 1.2822
Epoch 29: Train mse = 0.2441 | Val mse = 1.4795
Epoch 30: Train mse = 0.2285 | Val mse = 1.4773
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 10
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.20146813
1 day(s) RMSE                      : 9.00430609
1 day(s) R2                        : 0.03915689
1 day(s) Pearson r                 : 0.30098692
1 day(s) QLIKE                     : 0.31415881
3 day(s) MAE                       : 4.33727568
3 day(s) RMSE                      : 9.17077969
3 day(s) R2                        : 0.00343800
3 day(s) Pearson r                 : 0.23962553
3 day(s) QLIKE                     : 0.34502595
5 day(s) MAE                       : 4.49111390
5 day(s) RMSE                      : 9.30045268
5 day(s) R2                        : -0.02526502
5 day(s) Pearson r                 : 0.18662915
5 day(s) QLIKE                     : 0.36325883
10 day(s) MAE                      : 4.64775187
10 day(s) RMSE                     : 9.41256698
10 day(s) R2                       : -0.04987986
10 day(s) Pearson r                : 0.12390179
10 day(s) QLIKE                    : 0.38363134
full horizon MAE                   : 4.64775187
full horizon RMSE                  : 9.41256698
full horizon R2                    : -0.04987986
full horizon Pearson r             : 0.12390179
full horizon QLIKE                 : 0.38363134

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/BTCUSDT/Simple_KAN_H10.pkl

Saved y_true min=0.505764, max=131.49
Saved y_pred min=2.7579, max=36.6398

=== BTCUSDT | H=20 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2399
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1727, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  27.572355514245807
  Min value:  -0.39529821705307827
Checking X_time_train_core:
Shape: (1727, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.8178490491226165
Checking y_train_core (log_mse scaled):
Shape: (1727, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.564043013270673
  Min value:  -3.62021477149946
Checking X_price_val:
Shape: (192, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.1802886452741648
  Min value:  -0.3973706893292718
Checking X_time_val:
Shape: (192, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_val (log_mse scaled):
Shape: (192, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3500532890780983
  Min value:  -4.258357261790038
Checking X_price_test:
Shape: (480, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.92735513679459
  Min value:  -0.3866725144992877
Checking X_time_test:
Shape: (480, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_test (log_mse scaled):
Shape: (480, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5862887708961586
  Min value:  -2.578858106420128
Epoch 1: Train mse = 0.9760 | Val mse = 1.5198
Epoch 2: Train mse = 0.8611 | Val mse = 1.3277
Epoch 3: Train mse = 0.8050 | Val mse = 1.0892
Epoch 4: Train mse = 0.7589 | Val mse = 1.1437
Epoch 5: Train mse = 0.7512 | Val mse = 1.0430
Epoch 6: Train mse = 0.7280 | Val mse = 1.0495
Epoch 7: Train mse = 0.7134 | Val mse = 1.0538
Epoch 8: Train mse = 0.7029 | Val mse = 1.1117
Epoch 9: Train mse = 0.6700 | Val mse = 1.0547
Epoch 10: Train mse = 0.6550 | Val mse = 1.1146
Epoch 11: Train mse = 0.6326 | Val mse = 1.2623
Epoch 12: Train mse = 0.6027 | Val mse = 1.3657
Epoch 13: Train mse = 0.5736 | Val mse = 1.2920
Epoch 14: Train mse = 0.5476 | Val mse = 1.6908
Epoch 15: Train mse = 0.5267 | Val mse = 1.5492
Epoch 16: Train mse = 0.5006 | Val mse = 1.5376
Epoch 17: Train mse = 0.4723 | Val mse = 1.5060
Epoch 18: Train mse = 0.4732 | Val mse = 1.7684
Epoch 19: Train mse = 0.4547 | Val mse = 1.3398
Epoch 20: Train mse = 0.4454 | Val mse = 1.5203
Epoch 21: Train mse = 0.4259 | Val mse = 1.5122
Epoch 22: Train mse = 0.4093 | Val mse = 1.8656
Epoch 23: Train mse = 0.3950 | Val mse = 1.3993
Epoch 24: Train mse = 0.3848 | Val mse = 1.5856
Epoch 25: Train mse = 0.3700 | Val mse = 1.7324
Epoch 26: Train mse = 0.3593 | Val mse = 1.5301
Epoch 27: Train mse = 0.3503 | Val mse = 1.5422
Epoch 28: Train mse = 0.3338 | Val mse = 1.7733
Epoch 29: Train mse = 0.3236 | Val mse = 1.4796
Epoch 30: Train mse = 0.3132 | Val mse = 1.6644
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 20
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.28724831
1 day(s) RMSE                      : 8.99237870
1 day(s) R2                        : 0.04170073
1 day(s) Pearson r                 : 0.28503098
1 day(s) QLIKE                     : 0.32748216
3 day(s) MAE                       : 4.43561079
3 day(s) RMSE                      : 9.14311266
3 day(s) R2                        : 0.00944192
3 day(s) Pearson r                 : 0.23069162
3 day(s) QLIKE                     : 0.35529200
5 day(s) MAE                       : 4.55708916
5 day(s) RMSE                      : 9.27004665
5 day(s) R2                        : -0.01857216
5 day(s) Pearson r                 : 0.18132558
5 day(s) QLIKE                     : 0.36918408
10 day(s) MAE                      : 4.70943971
10 day(s) RMSE                     : 9.39026995
10 day(s) R2                       : -0.04491172
10 day(s) Pearson r                : 0.11929866
10 day(s) QLIKE                    : 0.38873036
20 day(s) MAE                      : 4.84299542
20 day(s) RMSE                     : 9.45338177
20 day(s) R2                       : -0.05937868
20 day(s) Pearson r                : 0.05865985
20 day(s) QLIKE                    : 0.40306223
full horizon MAE                   : 4.84299542
full horizon RMSE                  : 9.45338177
full horizon R2                    : -0.05937868
full horizon Pearson r             : 0.05865985
full horizon QLIKE                 : 0.40306223

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/BTCUSDT/Simple_KAN_H20.pkl

Saved y_true min=0.505764, max=131.49
Saved y_pred min=2.31328, max=38.209

=== EURUSD | H=1 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525025
  Min value:  -0.7148707238463011
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.9963965101732293
  Min value:  -3.718602223017603
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.408921165171427
  Min value:  -0.7103978753601863
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.319059579642504
  Min value:  -2.975928211620611
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144182
  Min value:  -0.7173043382357188
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3834322546316056
  Min value:  -5.349969967608034
Epoch 1: Train mse = 0.8552 | Val mse = 0.5148
Epoch 2: Train mse = 0.4654 | Val mse = 0.5382
Epoch 3: Train mse = 0.3364 | Val mse = 0.4245
Epoch 4: Train mse = 0.3098 | Val mse = 0.3963
Epoch 5: Train mse = 0.2670 | Val mse = 0.3614
Epoch 6: Train mse = 0.2696 | Val mse = 0.3310
Epoch 7: Train mse = 0.2246 | Val mse = 0.3302
Epoch 8: Train mse = 0.2088 | Val mse = 0.3298
Epoch 9: Train mse = 0.1922 | Val mse = 0.3256
Epoch 10: Train mse = 0.1692 | Val mse = 0.3253
Epoch 11: Train mse = 0.1390 | Val mse = 0.3361
Epoch 12: Train mse = 0.1196 | Val mse = 0.3468
Epoch 13: Train mse = 0.1091 | Val mse = 0.3398
Epoch 14: Train mse = 0.1056 | Val mse = 0.3715
Epoch 15: Train mse = 0.0949 | Val mse = 0.3587
Epoch 16: Train mse = 0.0845 | Val mse = 0.3576
Epoch 17: Train mse = 0.0738 | Val mse = 0.3568
Epoch 18: Train mse = 0.0680 | Val mse = 0.3751
Epoch 19: Train mse = 0.0606 | Val mse = 0.3882
Epoch 20: Train mse = 0.0543 | Val mse = 0.3840
Epoch 21: Train mse = 0.0454 | Val mse = 0.3846
Epoch 22: Train mse = 0.0406 | Val mse = 0.3861
Epoch 23: Train mse = 0.0383 | Val mse = 0.3884
Epoch 24: Train mse = 0.0332 | Val mse = 0.3752
Epoch 25: Train mse = 0.0279 | Val mse = 0.3594
Epoch 26: Train mse = 0.0250 | Val mse = 0.3621
Epoch 27: Train mse = 0.0291 | Val mse = 0.3533
Epoch 28: Train mse = 0.0263 | Val mse = 0.3678
Epoch 29: Train mse = 0.0236 | Val mse = 0.3498
Epoch 30: Train mse = 0.0172 | Val mse = 0.3707
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 1
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.08409319
1 day(s) RMSE                      : 0.17190700
1 day(s) R2                        : 0.35254521
1 day(s) Pearson r                 : 0.61482931
1 day(s) QLIKE                     : 0.41026098
full horizon MAE                   : 0.08409319
full horizon RMSE                  : 0.17190700
full horizon R2                    : 0.35254521
full horizon Pearson r             : 0.61482931
full horizon QLIKE                 : 0.41026098

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/EURUSD/Simple_KAN_H1.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00386759, max=0.856328

=== EURUSD | H=5 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 5
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525025
  Min value:  -0.7148707238463011
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.9970127449111077
  Min value:  -3.720451582171488
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.408921165171427
  Min value:  -0.7103978753601863
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.319427111757353
  Min value:  -2.9775048778703237
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144182
  Min value:  -0.7173043382357188
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3838234229198263
  Min value:  -5.352418327671095
Epoch 1: Train mse = 0.9217 | Val mse = 0.8865
Epoch 2: Train mse = 0.7172 | Val mse = 0.6306
Epoch 3: Train mse = 0.5584 | Val mse = 0.5639
Epoch 4: Train mse = 0.4251 | Val mse = 0.4026
Epoch 5: Train mse = 0.3270 | Val mse = 0.3714
Epoch 6: Train mse = 0.2945 | Val mse = 0.3421
Epoch 7: Train mse = 0.2594 | Val mse = 0.3323
Epoch 8: Train mse = 0.2442 | Val mse = 0.3153
Epoch 9: Train mse = 0.2180 | Val mse = 0.3116
Epoch 10: Train mse = 0.2085 | Val mse = 0.3116
Epoch 11: Train mse = 0.1974 | Val mse = 0.3127
Epoch 12: Train mse = 0.1815 | Val mse = 0.3200
Epoch 13: Train mse = 0.1683 | Val mse = 0.3293
Epoch 14: Train mse = 0.1589 | Val mse = 0.3403
Epoch 15: Train mse = 0.1473 | Val mse = 0.3391
Epoch 16: Train mse = 0.1354 | Val mse = 0.3399
Epoch 17: Train mse = 0.1252 | Val mse = 0.3520
Epoch 18: Train mse = 0.1165 | Val mse = 0.3392
Epoch 19: Train mse = 0.1083 | Val mse = 0.3560
Epoch 20: Train mse = 0.1021 | Val mse = 0.3540
Epoch 21: Train mse = 0.0951 | Val mse = 0.3571
Epoch 22: Train mse = 0.0890 | Val mse = 0.3592
Epoch 23: Train mse = 0.0828 | Val mse = 0.3492
Epoch 24: Train mse = 0.0805 | Val mse = 0.3615
Epoch 25: Train mse = 0.0722 | Val mse = 0.3712
Epoch 26: Train mse = 0.0671 | Val mse = 0.3714
Epoch 27: Train mse = 0.0602 | Val mse = 0.3804
Epoch 28: Train mse = 0.0548 | Val mse = 0.3728
Epoch 29: Train mse = 0.0522 | Val mse = 0.3714
Epoch 30: Train mse = 0.0488 | Val mse = 0.3764
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 5
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.08703391
1 day(s) RMSE                      : 0.16893710
1 day(s) R2                        : 0.37472306
1 day(s) Pearson r                 : 0.62124690
1 day(s) QLIKE                     : 0.37428720
3 day(s) MAE                       : 0.08887696
3 day(s) RMSE                      : 0.17914464
3 day(s) R2                        : 0.29145470
3 day(s) Pearson r                 : 0.55767703
3 day(s) QLIKE                     : 0.39093673
5 day(s) MAE                       : 0.09015884
5 day(s) RMSE                      : 0.18061165
5 day(s) R2                        : 0.27161871
5 day(s) Pearson r                 : 0.53882474
5 day(s) QLIKE                     : 0.42595533
full horizon MAE                   : 0.09015884
full horizon RMSE                  : 0.18061165
full horizon R2                    : 0.27161871
full horizon Pearson r             : 0.53882474
full horizon QLIKE                 : 0.42595533

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/EURUSD/Simple_KAN_H5.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00315231, max=1.03881

=== EURUSD | H=10 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 10
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525025
  Min value:  -0.7148707238463011
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.997326788566986
  Min value:  -3.719996871136433
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.408921165171427
  Min value:  -0.7103978753601863
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.319755344427202
  Min value:  -2.977065724545838
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144182
  Min value:  -0.7173043382357188
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.384150307095678
  Min value:  -5.351929442359286
Epoch 1: Train mse = 0.9301 | Val mse = 0.9332
Epoch 2: Train mse = 0.7702 | Val mse = 0.7102
Epoch 3: Train mse = 0.6160 | Val mse = 0.6323
Epoch 4: Train mse = 0.4921 | Val mse = 0.4819
Epoch 5: Train mse = 0.3856 | Val mse = 0.3959
Epoch 6: Train mse = 0.3316 | Val mse = 0.3693
Epoch 7: Train mse = 0.2890 | Val mse = 0.3461
Epoch 8: Train mse = 0.2733 | Val mse = 0.3035
Epoch 9: Train mse = 0.2455 | Val mse = 0.3194
Epoch 10: Train mse = 0.2372 | Val mse = 0.3167
Epoch 11: Train mse = 0.2316 | Val mse = 0.2999
Epoch 12: Train mse = 0.2178 | Val mse = 0.3152
Epoch 13: Train mse = 0.2050 | Val mse = 0.3142
Epoch 14: Train mse = 0.2016 | Val mse = 0.3234
Epoch 15: Train mse = 0.1880 | Val mse = 0.3236
Epoch 16: Train mse = 0.1796 | Val mse = 0.3334
Epoch 17: Train mse = 0.1725 | Val mse = 0.3344
Epoch 18: Train mse = 0.1686 | Val mse = 0.3447
Epoch 19: Train mse = 0.1642 | Val mse = 0.3279
Epoch 20: Train mse = 0.1547 | Val mse = 0.3394
Epoch 21: Train mse = 0.1495 | Val mse = 0.3366
Epoch 22: Train mse = 0.1457 | Val mse = 0.3446
Epoch 23: Train mse = 0.1377 | Val mse = 0.3410
Epoch 24: Train mse = 0.1359 | Val mse = 0.3438
Epoch 25: Train mse = 0.1269 | Val mse = 0.3437
Epoch 26: Train mse = 0.1203 | Val mse = 0.3566
Epoch 27: Train mse = 0.1157 | Val mse = 0.3580
Epoch 28: Train mse = 0.1066 | Val mse = 0.3642
Epoch 29: Train mse = 0.1052 | Val mse = 0.3533
Epoch 30: Train mse = 0.1000 | Val mse = 0.3636
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 10
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.08865974
1 day(s) RMSE                      : 0.16836132
1 day(s) R2                        : 0.37897802
1 day(s) Pearson r                 : 0.62025950
1 day(s) QLIKE                     : 0.40133784
3 day(s) MAE                       : 0.09022202
3 day(s) RMSE                      : 0.17521972
3 day(s) R2                        : 0.32216196
3 day(s) Pearson r                 : 0.57517456
3 day(s) QLIKE                     : 0.40000221
5 day(s) MAE                       : 0.09250928
5 day(s) RMSE                      : 0.17898423
5 day(s) R2                        : 0.28468583
5 day(s) Pearson r                 : 0.54721257
5 day(s) QLIKE                     : 0.40472232
10 day(s) MAE                      : 0.09469696
10 day(s) RMSE                     : 0.18440835
10 day(s) R2                       : 0.23342555
10 day(s) Pearson r                : 0.50536975
10 day(s) QLIKE                    : 0.42423480
full horizon MAE                   : 0.09469696
full horizon RMSE                  : 0.18440835
full horizon R2                    : 0.23342555
full horizon Pearson r             : 0.50536975
full horizon QLIKE                 : 0.42423480

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/EURUSD/Simple_KAN_H10.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00389452, max=1.1875

=== EURUSD | H=20 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525025
  Min value:  -0.7148707238463011
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.998957169490066
  Min value:  -3.7200073694899434
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.408921165171427
  Min value:  -0.7103978753601863
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3212202110774225
  Min value:  -2.9768947428361368
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144182
  Min value:  -0.7173043382357188
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3856309038737162
  Min value:  -5.3523385822659755
Epoch 1: Train mse = 0.9342 | Val mse = 0.9623
Epoch 2: Train mse = 0.7896 | Val mse = 0.7242
Epoch 3: Train mse = 0.6343 | Val mse = 0.6767
Epoch 4: Train mse = 0.5217 | Val mse = 0.5373
Epoch 5: Train mse = 0.3928 | Val mse = 0.3925
Epoch 6: Train mse = 0.3340 | Val mse = 0.3764
Epoch 7: Train mse = 0.3021 | Val mse = 0.3731
Epoch 8: Train mse = 0.2841 | Val mse = 0.3346
Epoch 9: Train mse = 0.2695 | Val mse = 0.3404
Epoch 10: Train mse = 0.2575 | Val mse = 0.3406
Epoch 11: Train mse = 0.2519 | Val mse = 0.3225
Epoch 12: Train mse = 0.2409 | Val mse = 0.3390
Epoch 13: Train mse = 0.2347 | Val mse = 0.3422
Epoch 14: Train mse = 0.2285 | Val mse = 0.3377
Epoch 15: Train mse = 0.2205 | Val mse = 0.3523
Epoch 16: Train mse = 0.2127 | Val mse = 0.3539
Epoch 17: Train mse = 0.2055 | Val mse = 0.3581
Epoch 18: Train mse = 0.1981 | Val mse = 0.3716
Epoch 19: Train mse = 0.1956 | Val mse = 0.3619
Epoch 20: Train mse = 0.1877 | Val mse = 0.3722
Epoch 21: Train mse = 0.1859 | Val mse = 0.3583
Epoch 22: Train mse = 0.1820 | Val mse = 0.3789
Epoch 23: Train mse = 0.1758 | Val mse = 0.3608
Epoch 24: Train mse = 0.1723 | Val mse = 0.3715
Epoch 25: Train mse = 0.1665 | Val mse = 0.3618
Epoch 26: Train mse = 0.1609 | Val mse = 0.3760
Epoch 27: Train mse = 0.1563 | Val mse = 0.3737
Epoch 28: Train mse = 0.1514 | Val mse = 0.3742
Epoch 29: Train mse = 0.1472 | Val mse = 0.3741
Epoch 30: Train mse = 0.1443 | Val mse = 0.3910
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 20
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.08938343
1 day(s) RMSE                      : 0.17470632
1 day(s) R2                        : 0.33128730
1 day(s) Pearson r                 : 0.58734444
1 day(s) QLIKE                     : 0.46710309
3 day(s) MAE                       : 0.09216679
3 day(s) RMSE                      : 0.17772054
3 day(s) R2                        : 0.30267502
3 day(s) Pearson r                 : 0.56072037
3 day(s) QLIKE                     : 0.44199299
5 day(s) MAE                       : 0.09358763
5 day(s) RMSE                      : 0.18024687
5 day(s) R2                        : 0.27455790
5 day(s) Pearson r                 : 0.53969217
5 day(s) QLIKE                     : 0.43391566
10 day(s) MAE                      : 0.09692819
10 day(s) RMSE                     : 0.18635014
10 day(s) R2                       : 0.21719668
10 day(s) Pearson r                : 0.49693950
10 day(s) QLIKE                    : 0.45666437
20 day(s) MAE                      : 0.09988040
20 day(s) RMSE                     : 0.19000796
20 day(s) R2                       : 0.17656573
20 day(s) Pearson r                : 0.46223998
20 day(s) QLIKE                    : 0.48809595
full horizon MAE                   : 0.09988040
full horizon RMSE                  : 0.19000796
full horizon R2                    : 0.17656573
full horizon Pearson r             : 0.46223998
full horizon QLIKE                 : 0.48809595

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/EURUSD/Simple_KAN_H20.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00302536, max=1.01372

=== GOLD | H=1 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 5534
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  17.63876776611196
  Min value:  -0.5790835075307484
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.28323490000094
  Min value:  -5.250298730157779
Checking X_price_val:
Shape: (443, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722398346
  Min value:  -0.5755730380699846
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.7902797110229713
  Min value:  -2.119473437475616
Checking X_price_test:
Shape: (1107, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698947
  Min value:  -0.5768892216495461
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5405778347636028
  Min value:  -2.3606736150182797
Epoch 1: Train mse = 0.9003 | Val mse = 0.5067
Epoch 2: Train mse = 0.5157 | Val mse = 0.3988
Epoch 3: Train mse = 0.3481 | Val mse = 0.3219
Epoch 4: Train mse = 0.3139 | Val mse = 0.2837
Epoch 5: Train mse = 0.2627 | Val mse = 0.2630
Epoch 6: Train mse = 0.2207 | Val mse = 0.2463
Epoch 7: Train mse = 0.1951 | Val mse = 0.2418
Epoch 8: Train mse = 0.1655 | Val mse = 0.2336
Epoch 9: Train mse = 0.1417 | Val mse = 0.2298
Epoch 10: Train mse = 0.1338 | Val mse = 0.2390
Epoch 11: Train mse = 0.1215 | Val mse = 0.2550
Epoch 12: Train mse = 0.1052 | Val mse = 0.2570
Epoch 13: Train mse = 0.0946 | Val mse = 0.2691
Epoch 14: Train mse = 0.0929 | Val mse = 0.2453
Epoch 15: Train mse = 0.0841 | Val mse = 0.2566
Epoch 16: Train mse = 0.0766 | Val mse = 0.2505
Epoch 17: Train mse = 0.0725 | Val mse = 0.2647
Epoch 18: Train mse = 0.0684 | Val mse = 0.2845
Epoch 19: Train mse = 0.0663 | Val mse = 0.2710
Epoch 20: Train mse = 0.0605 | Val mse = 0.2708
Epoch 21: Train mse = 0.0547 | Val mse = 0.2763
Epoch 22: Train mse = 0.0491 | Val mse = 0.2658
Epoch 23: Train mse = 0.0447 | Val mse = 0.2856
Epoch 24: Train mse = 0.0427 | Val mse = 0.2816
Epoch 25: Train mse = 0.0426 | Val mse = 0.2789
Epoch 26: Train mse = 0.0385 | Val mse = 0.2961
Epoch 27: Train mse = 0.0334 | Val mse = 0.2860
Epoch 28: Train mse = 0.0280 | Val mse = 0.2848
Epoch 29: Train mse = 0.0294 | Val mse = 0.2751
Epoch 30: Train mse = 0.0263 | Val mse = 0.2811
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 1
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.28862430
1 day(s) RMSE                      : 0.54422790
1 day(s) R2                        : 0.31029859
1 day(s) Pearson r                 : 0.59421240
1 day(s) QLIKE                     : 0.54432552
full horizon MAE                   : 0.28862430
full horizon RMSE                  : 0.54422790
full horizon R2                    : 0.31029859
full horizon Pearson r             : 0.59421240
full horizon QLIKE                 : 0.54432552

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/GOLD/Simple_KAN_H1.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.00269326, max=3.6102

=== GOLD | H=5 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 5534
Time steps for y: 5
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  17.63876776611196
  Min value:  -0.5790835075307484
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.283677745863046
  Min value:  -5.250623802304629
Checking X_price_val:
Shape: (443, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722398346
  Min value:  -0.5755730380699846
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.7906723083316343
  Min value:  -2.1194793742414912
Checking X_price_test:
Shape: (1107, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698947
  Min value:  -0.5768892216495461
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5409449791338357
  Min value:  -2.3607041381163065
Epoch 1: Train mse = 0.9183 | Val mse = 0.5057
Epoch 2: Train mse = 0.6644 | Val mse = 0.4680
Epoch 3: Train mse = 0.4690 | Val mse = 0.3810
Epoch 4: Train mse = 0.3578 | Val mse = 0.3274
Epoch 5: Train mse = 0.3037 | Val mse = 0.3009
Epoch 6: Train mse = 0.2609 | Val mse = 0.2754
Epoch 7: Train mse = 0.2235 | Val mse = 0.2622
Epoch 8: Train mse = 0.1986 | Val mse = 0.2449
Epoch 9: Train mse = 0.1740 | Val mse = 0.2406
Epoch 10: Train mse = 0.1587 | Val mse = 0.2373
Epoch 11: Train mse = 0.1468 | Val mse = 0.2426
Epoch 12: Train mse = 0.1366 | Val mse = 0.2438
Epoch 13: Train mse = 0.1281 | Val mse = 0.2502
Epoch 14: Train mse = 0.1202 | Val mse = 0.2451
Epoch 15: Train mse = 0.1167 | Val mse = 0.2429
Epoch 16: Train mse = 0.1066 | Val mse = 0.2535
Epoch 17: Train mse = 0.1052 | Val mse = 0.2522
Epoch 18: Train mse = 0.1017 | Val mse = 0.2533
Epoch 19: Train mse = 0.0947 | Val mse = 0.2494
Epoch 20: Train mse = 0.0881 | Val mse = 0.2554
Epoch 21: Train mse = 0.0834 | Val mse = 0.2649
Epoch 22: Train mse = 0.0792 | Val mse = 0.2652
Epoch 23: Train mse = 0.0738 | Val mse = 0.2642
Epoch 24: Train mse = 0.0701 | Val mse = 0.2608
Epoch 25: Train mse = 0.0672 | Val mse = 0.2765
Epoch 26: Train mse = 0.0642 | Val mse = 0.2732
Epoch 27: Train mse = 0.0603 | Val mse = 0.2734
Epoch 28: Train mse = 0.0571 | Val mse = 0.2774
Epoch 29: Train mse = 0.0557 | Val mse = 0.2753
Epoch 30: Train mse = 0.0537 | Val mse = 0.2841
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 5
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.29802935
1 day(s) RMSE                      : 0.54727011
1 day(s) R2                        : 0.30256622
1 day(s) Pearson r                 : 0.55357295
1 day(s) QLIKE                     : 0.40674870
3 day(s) MAE                       : 0.30071062
3 day(s) RMSE                      : 0.56464566
3 day(s) R2                        : 0.25819679
3 day(s) Pearson r                 : 0.51637904
3 day(s) QLIKE                     : 0.42083903
5 day(s) MAE                       : 0.30684668
5 day(s) RMSE                      : 0.57142210
5 day(s) R2                        : 0.24244625
5 day(s) Pearson r                 : 0.50406822
5 day(s) QLIKE                     : 0.44368664
full horizon MAE                   : 0.30684668
full horizon RMSE                  : 0.57142210
full horizon R2                    : 0.24244625
full horizon Pearson r             : 0.50406822
full horizon QLIKE                 : 0.44368664

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/GOLD/Simple_KAN_H5.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.00508248, max=3.43164

=== GOLD | H=10 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 5534
Time steps for y: 10
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  17.63876776611196
  Min value:  -0.5790835075307484
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.2851543622719985
  Min value:  -5.254244390683224
Checking X_price_val:
Shape: (443, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722398346
  Min value:  -0.5755730380699846
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.7918153902171272
  Min value:  -2.1209816396053798
Checking X_price_test:
Shape: (1107, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698947
  Min value:  -0.5768892216495461
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5419191122017333
  Min value:  -2.3623696000304566
Epoch 1: Train mse = 0.9430 | Val mse = 0.4915
Epoch 2: Train mse = 0.7320 | Val mse = 0.4995
Epoch 3: Train mse = 0.5617 | Val mse = 0.4144
Epoch 4: Train mse = 0.4362 | Val mse = 0.3696
Epoch 5: Train mse = 0.3519 | Val mse = 0.3385
Epoch 6: Train mse = 0.2948 | Val mse = 0.2905
Epoch 7: Train mse = 0.2489 | Val mse = 0.2730
Epoch 8: Train mse = 0.2193 | Val mse = 0.2574
Epoch 9: Train mse = 0.1941 | Val mse = 0.2487
Epoch 10: Train mse = 0.1821 | Val mse = 0.2503
Epoch 11: Train mse = 0.1682 | Val mse = 0.2505
Epoch 12: Train mse = 0.1564 | Val mse = 0.2472
Epoch 13: Train mse = 0.1485 | Val mse = 0.2533
Epoch 14: Train mse = 0.1414 | Val mse = 0.2554
Epoch 15: Train mse = 0.1352 | Val mse = 0.2591
Epoch 16: Train mse = 0.1282 | Val mse = 0.2561
Epoch 17: Train mse = 0.1233 | Val mse = 0.2606
Epoch 18: Train mse = 0.1198 | Val mse = 0.2713
Epoch 19: Train mse = 0.1150 | Val mse = 0.2715
Epoch 20: Train mse = 0.1106 | Val mse = 0.2754
Epoch 21: Train mse = 0.1059 | Val mse = 0.2792
Epoch 22: Train mse = 0.0998 | Val mse = 0.2789
Epoch 23: Train mse = 0.0936 | Val mse = 0.2796
Epoch 24: Train mse = 0.0887 | Val mse = 0.2860
Epoch 25: Train mse = 0.0854 | Val mse = 0.2958
Epoch 26: Train mse = 0.0812 | Val mse = 0.3025
Epoch 27: Train mse = 0.0764 | Val mse = 0.3002
Epoch 28: Train mse = 0.0741 | Val mse = 0.3031
Epoch 29: Train mse = 0.0713 | Val mse = 0.3111
Epoch 30: Train mse = 0.0665 | Val mse = 0.3135
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 10
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.27871042
1 day(s) RMSE                      : 0.53236059
1 day(s) R2                        : 0.34004958
1 day(s) Pearson r                 : 0.60390574
1 day(s) QLIKE                     : 0.43434120
3 day(s) MAE                       : 0.29394684
3 day(s) RMSE                      : 0.56120094
3 day(s) R2                        : 0.26722020
3 day(s) Pearson r                 : 0.54722980
3 day(s) QLIKE                     : 0.45101764
5 day(s) MAE                       : 0.30252208
5 day(s) RMSE                      : 0.57649303
5 day(s) R2                        : 0.22894118
5 day(s) Pearson r                 : 0.51819016
5 day(s) QLIKE                     : 0.44561320
10 day(s) MAE                      : 0.31602897
10 day(s) RMSE                     : 0.58808690
10 day(s) R2                       : 0.20656595
10 day(s) Pearson r                : 0.49991847
10 day(s) QLIKE                    : 0.49276984
full horizon MAE                   : 0.31602897
full horizon RMSE                  : 0.58808690
full horizon R2                    : 0.20656595
full horizon Pearson r             : 0.49991847
full horizon QLIKE                 : 0.49276984

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/GOLD/Simple_KAN_H10.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.00757664, max=4.8144

=== GOLD | H=20 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 5534
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  17.63876776611196
  Min value:  -0.5790835075307484
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.2870228653696634
  Min value:  -5.257736685381005
Checking X_price_val:
Shape: (443, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722398346
  Min value:  -0.5755730380699846
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.793333110638287
  Min value:  -2.1222460659281195
Checking X_price_test:
Shape: (1107, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698947
  Min value:  -0.5768892216495461
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5432591469134391
  Min value:  -2.3638056623268993
Epoch 1: Train mse = 0.9612 | Val mse = 0.4869
Epoch 2: Train mse = 0.8090 | Val mse = 0.5645
Epoch 3: Train mse = 0.6416 | Val mse = 0.4724
Epoch 4: Train mse = 0.5089 | Val mse = 0.4341
Epoch 5: Train mse = 0.4128 | Val mse = 0.4095
Epoch 6: Train mse = 0.3559 | Val mse = 0.3674
Epoch 7: Train mse = 0.3138 | Val mse = 0.3326
Epoch 8: Train mse = 0.2749 | Val mse = 0.3020
Epoch 9: Train mse = 0.2421 | Val mse = 0.3026
Epoch 10: Train mse = 0.2226 | Val mse = 0.2818
Epoch 11: Train mse = 0.2078 | Val mse = 0.2840
Epoch 12: Train mse = 0.1951 | Val mse = 0.2674
Epoch 13: Train mse = 0.1882 | Val mse = 0.2712
Epoch 14: Train mse = 0.1833 | Val mse = 0.2687
Epoch 15: Train mse = 0.1776 | Val mse = 0.2744
Epoch 16: Train mse = 0.1699 | Val mse = 0.2679
Epoch 17: Train mse = 0.1610 | Val mse = 0.2710
Epoch 18: Train mse = 0.1554 | Val mse = 0.2759
Epoch 19: Train mse = 0.1507 | Val mse = 0.2876
Epoch 20: Train mse = 0.1442 | Val mse = 0.2852
Epoch 21: Train mse = 0.1392 | Val mse = 0.2923
Epoch 22: Train mse = 0.1333 | Val mse = 0.2979
Epoch 23: Train mse = 0.1302 | Val mse = 0.3003
Epoch 24: Train mse = 0.1267 | Val mse = 0.3022
Epoch 25: Train mse = 0.1219 | Val mse = 0.3045
Epoch 26: Train mse = 0.1155 | Val mse = 0.3006
Epoch 27: Train mse = 0.1097 | Val mse = 0.3173
Epoch 28: Train mse = 0.1064 | Val mse = 0.3146
Epoch 29: Train mse = 0.1007 | Val mse = 0.3254
Epoch 30: Train mse = 0.0952 | Val mse = 0.3210
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 20
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.30308997
1 day(s) RMSE                      : 0.55048019
1 day(s) R2                        : 0.29436047
1 day(s) Pearson r                 : 0.55948752
1 day(s) QLIKE                     : 0.39813374
3 day(s) MAE                       : 0.30720722
3 day(s) RMSE                      : 0.57525939
3 day(s) R2                        : 0.23004711
3 day(s) Pearson r                 : 0.51383475
3 day(s) QLIKE                     : 0.41860763
5 day(s) MAE                       : 0.31314457
5 day(s) RMSE                      : 0.58427711
5 day(s) R2                        : 0.20797822
5 day(s) Pearson r                 : 0.49581011
5 day(s) QLIKE                     : 0.44071589
10 day(s) MAE                      : 0.32000525
10 day(s) RMSE                     : 0.58960252
10 day(s) R2                       : 0.20247101
10 day(s) Pearson r                : 0.49431692
10 day(s) QLIKE                    : 0.49043000
20 day(s) MAE                      : 0.34287990
20 day(s) RMSE                     : 0.62801041
20 day(s) R2                       : 0.10853622
20 day(s) Pearson r                : 0.41496603
20 day(s) QLIKE                    : 0.58224203
full horizon MAE                   : 0.34287990
full horizon RMSE                  : 0.62801041
full horizon R2                    : 0.10853622
full horizon Pearson r             : 0.41496603
full horizon QLIKE                 : 0.58224203

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/GOLD/Simple_KAN_H20.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.00448748, max=5.82162

=== SP500 | H=1 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3466
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790344
  Min value:  -0.29186160079947243
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.598103080699805
  Min value:  -18.186962127218408
Checking X_price_val:
Shape: (277, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.189039765537116
  Min value:  -0.288808876061582
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3850009093345803
  Min value:  -2.4181466917899535
Checking X_price_test:
Shape: (694, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.65876861006644
  Min value:  -0.2909517549064528
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.3795163284544953
  Min value:  -3.2441546953028633
Epoch 1: Train mse = 0.9028 | Val mse = 0.7569
Epoch 2: Train mse = 0.7421 | Val mse = 0.6542
Epoch 3: Train mse = 0.6373 | Val mse = 0.5390
Epoch 4: Train mse = 0.6001 | Val mse = 0.4889
Epoch 5: Train mse = 0.5443 | Val mse = 0.4986
Epoch 6: Train mse = 0.5010 | Val mse = 0.4980
Epoch 7: Train mse = 0.4769 | Val mse = 0.5330
Epoch 8: Train mse = 0.4733 | Val mse = 0.5262
Epoch 9: Train mse = 0.4407 | Val mse = 0.5143
Epoch 10: Train mse = 0.4151 | Val mse = 0.5187
Epoch 11: Train mse = 0.4077 | Val mse = 0.5298
Epoch 12: Train mse = 0.3843 | Val mse = 0.5385
Epoch 13: Train mse = 0.3805 | Val mse = 0.5749
Epoch 14: Train mse = 0.3657 | Val mse = 0.5454
Epoch 15: Train mse = 0.4134 | Val mse = 0.5752
Epoch 16: Train mse = 0.4123 | Val mse = 0.5858
Epoch 17: Train mse = 0.3465 | Val mse = 0.5700
Epoch 18: Train mse = 0.2979 | Val mse = 0.5424
Epoch 19: Train mse = 0.2660 | Val mse = 0.5721
Epoch 20: Train mse = 0.2397 | Val mse = 0.5583
Epoch 21: Train mse = 0.2381 | Val mse = 0.5652
Epoch 22: Train mse = 0.2430 | Val mse = 0.5410
Epoch 23: Train mse = 0.2359 | Val mse = 0.5277
Epoch 24: Train mse = 0.2249 | Val mse = 0.5372
Epoch 25: Train mse = 0.2129 | Val mse = 0.5387
Epoch 26: Train mse = 0.2054 | Val mse = 0.5633
Epoch 27: Train mse = 0.1962 | Val mse = 0.5768
Epoch 28: Train mse = 0.1983 | Val mse = 0.5622
Epoch 29: Train mse = 0.1929 | Val mse = 0.5863
Epoch 30: Train mse = 0.1881 | Val mse = 0.6227
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 1
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.49997044
1 day(s) RMSE                      : 2.39624140
1 day(s) R2                        : 0.28389833
1 day(s) Pearson r                 : 0.65403000
1 day(s) QLIKE                     : 0.47191816
full horizon MAE                   : 0.49997044
full horizon RMSE                  : 2.39624140
full horizon R2                    : 0.28389833
full horizon Pearson r             : 0.65403000
full horizon QLIKE                 : 0.47191816

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/SP500/Simple_KAN_H1.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.0677789, max=9.42283

=== SP500 | H=5 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3466
Time steps for y: 5
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790344
  Min value:  -0.29186160079947243
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.596907486751884
  Min value:  -18.180099645819357
Checking X_price_val:
Shape: (277, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.189039765537116
  Min value:  -0.288808876061582
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.384254029613913
  Min value:  -2.417116935778614
Checking X_price_test:
Shape: (694, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.65876861006644
  Min value:  -0.2909517549064528
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.3784015875360525
  Min value:  -3.2428194072829677
Epoch 1: Train mse = 0.9275 | Val mse = 0.8104
Epoch 2: Train mse = 0.7860 | Val mse = 0.8305
Epoch 3: Train mse = 0.7296 | Val mse = 0.7517
Epoch 4: Train mse = 0.7032 | Val mse = 0.7608
Epoch 5: Train mse = 0.6804 | Val mse = 0.7547
Epoch 6: Train mse = 0.6658 | Val mse = 0.7476
Epoch 7: Train mse = 0.6611 | Val mse = 0.7553
Epoch 8: Train mse = 0.6527 | Val mse = 0.7444
Epoch 9: Train mse = 0.6414 | Val mse = 0.7281
Epoch 10: Train mse = 0.6351 | Val mse = 0.7071
Epoch 11: Train mse = 0.6478 | Val mse = 0.6999
Epoch 12: Train mse = 0.6288 | Val mse = 0.6614
Epoch 13: Train mse = 0.6081 | Val mse = 0.6869
Epoch 14: Train mse = 0.5900 | Val mse = 0.6212
Epoch 15: Train mse = 0.5756 | Val mse = 0.6174
Epoch 16: Train mse = 0.5444 | Val mse = 0.6253
Epoch 17: Train mse = 0.5201 | Val mse = 0.6052
Epoch 18: Train mse = 0.4971 | Val mse = 0.5834
Epoch 19: Train mse = 0.4806 | Val mse = 0.5831
Epoch 20: Train mse = 0.4619 | Val mse = 0.6205
Epoch 21: Train mse = 0.4375 | Val mse = 0.6027
Epoch 22: Train mse = 0.4350 | Val mse = 0.5769
Epoch 23: Train mse = 0.4211 | Val mse = 0.6526
Epoch 24: Train mse = 0.4002 | Val mse = 0.6037
Epoch 25: Train mse = 0.3838 | Val mse = 0.6065
Epoch 26: Train mse = 0.3653 | Val mse = 0.5997
Epoch 27: Train mse = 0.3476 | Val mse = 0.6191
Epoch 28: Train mse = 0.3336 | Val mse = 0.6450
Epoch 29: Train mse = 0.3235 | Val mse = 0.6324
Epoch 30: Train mse = 0.3149 | Val mse = 0.6589
Epoch 31: Train mse = 0.2883 | Val mse = 0.6607
Epoch 32: Train mse = 0.2779 | Val mse = 0.6588
Early stopping triggered at epoch 32.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 5
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.59308450
1 day(s) RMSE                      : 2.54715518
1 day(s) R2                        : 0.19085871
1 day(s) Pearson r                 : 0.54198579
1 day(s) QLIKE                     : 0.62028127
3 day(s) MAE                       : 0.70351322
3 day(s) RMSE                      : 3.19267388
3 day(s) R2                        : -0.27116428
3 day(s) Pearson r                 : 0.38393386
3 day(s) QLIKE                     : 0.69833720
5 day(s) MAE                       : 0.79043202
5 day(s) RMSE                      : 3.74713413
5 day(s) R2                        : -0.75091943
5 day(s) Pearson r                 : 0.25139463
5 day(s) QLIKE                     : 0.73104298
full horizon MAE                   : 0.79043202
full horizon RMSE                  : 3.74713413
full horizon R2                    : -0.75091943
full horizon Pearson r             : 0.25139463
full horizon QLIKE                 : 0.73104298

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/SP500/Simple_KAN_H5.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.00246211, max=54.9897

=== SP500 | H=10 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3466
Time steps for y: 10
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790344
  Min value:  -0.29186160079947243
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.596947126941468
  Min value:  -18.18335453470548
Checking X_price_val:
Shape: (277, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.189039765537116
  Min value:  -0.288808876061582
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.384110213828625
  Min value:  -2.417987125769155
Checking X_price_test:
Shape: (694, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.65876861006644
  Min value:  -0.2909517549064528
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.378408171115341
  Min value:  -3.2438145134653293
Epoch 1: Train mse = 0.9644 | Val mse = 0.8654
Epoch 2: Train mse = 0.8324 | Val mse = 0.8471
Epoch 3: Train mse = 0.7910 | Val mse = 0.7863
Epoch 4: Train mse = 0.7592 | Val mse = 0.7752
Epoch 5: Train mse = 0.7406 | Val mse = 0.7864
Epoch 6: Train mse = 0.7287 | Val mse = 0.7928
Epoch 7: Train mse = 0.7231 | Val mse = 0.7784
Epoch 8: Train mse = 0.7132 | Val mse = 0.7812
Epoch 9: Train mse = 0.7013 | Val mse = 0.7879
Epoch 10: Train mse = 0.7033 | Val mse = 0.7859
Epoch 11: Train mse = 0.7029 | Val mse = 0.7777
Epoch 12: Train mse = 0.6865 | Val mse = 0.7323
Epoch 13: Train mse = 0.6692 | Val mse = 0.7267
Epoch 14: Train mse = 0.6533 | Val mse = 0.7102
Epoch 15: Train mse = 0.6331 | Val mse = 0.7083
Epoch 16: Train mse = 0.6132 | Val mse = 0.6556
Epoch 17: Train mse = 0.6075 | Val mse = 0.6600
Epoch 18: Train mse = 0.5807 | Val mse = 0.6430
Epoch 19: Train mse = 0.5612 | Val mse = 0.6377
Epoch 20: Train mse = 0.5479 | Val mse = 0.6887
Epoch 21: Train mse = 0.5225 | Val mse = 0.6564
Epoch 22: Train mse = 0.5055 | Val mse = 0.6540
Epoch 23: Train mse = 0.4834 | Val mse = 0.6244
Epoch 24: Train mse = 0.4704 | Val mse = 0.6312
Epoch 25: Train mse = 0.4568 | Val mse = 0.6734
Epoch 26: Train mse = 0.4495 | Val mse = 0.7020
Epoch 27: Train mse = 0.4324 | Val mse = 0.6269
Epoch 28: Train mse = 0.4251 | Val mse = 0.6670
Epoch 29: Train mse = 0.4114 | Val mse = 0.6535
Epoch 30: Train mse = 0.3968 | Val mse = 0.6588
Epoch 31: Train mse = 0.4028 | Val mse = 0.6520
Epoch 32: Train mse = 0.3854 | Val mse = 0.6864
Epoch 33: Train mse = 0.3669 | Val mse = 0.6805
Early stopping triggered at epoch 33.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 10
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.59881581
1 day(s) RMSE                      : 2.55019538
1 day(s) R2                        : 0.18892603
1 day(s) Pearson r                 : 0.53417732
1 day(s) QLIKE                     : 0.51713623
3 day(s) MAE                       : 0.69887411
3 day(s) RMSE                      : 3.16712780
3 day(s) R2                        : -0.25090330
3 day(s) Pearson r                 : 0.35630985
3 day(s) QLIKE                     : 0.58972365
5 day(s) MAE                       : 0.79071023
5 day(s) RMSE                      : 3.54935411
5 day(s) R2                        : -0.57096440
5 day(s) Pearson r                 : 0.24718662
5 day(s) QLIKE                     : 0.68991371
10 day(s) MAE                      : 0.85558021
10 day(s) RMSE                     : 3.64931849
10 day(s) R2                       : -0.66047526
10 day(s) Pearson r                : 0.15839601
10 day(s) QLIKE                    : 0.88979748
full horizon MAE                   : 0.85558021
full horizon RMSE                  : 3.64931849
full horizon R2                    : -0.66047526
full horizon Pearson r             : 0.15839601
full horizon QLIKE                 : 0.88979748

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/SP500/Simple_KAN_H10.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.00811509, max=49.7571

=== SP500 | H=20 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3466
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790344
  Min value:  -0.29186160079947243
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.5963112311058607
  Min value:  -18.187442810157716
Checking X_price_val:
Shape: (277, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.189039765537116
  Min value:  -0.288808876061582
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3832820721048678
  Min value:  -2.446834413184202
Checking X_price_test:
Shape: (694, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.65876861006644
  Min value:  -0.2909517549064528
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.377737634830384
  Min value:  -3.2455347334033506
Epoch 1: Train mse = 0.9694 | Val mse = 0.9072
Epoch 2: Train mse = 0.8552 | Val mse = 0.9263
Epoch 3: Train mse = 0.8230 | Val mse = 0.8346
Epoch 4: Train mse = 0.8026 | Val mse = 0.8243
Epoch 5: Train mse = 0.7895 | Val mse = 0.8448
Epoch 6: Train mse = 0.7813 | Val mse = 0.8299
Epoch 7: Train mse = 0.7773 | Val mse = 0.8334
Epoch 8: Train mse = 0.7694 | Val mse = 0.8360
Epoch 9: Train mse = 0.7635 | Val mse = 0.8568
Epoch 10: Train mse = 0.7668 | Val mse = 0.8179
Epoch 11: Train mse = 0.7515 | Val mse = 0.8446
Epoch 12: Train mse = 0.7401 | Val mse = 0.7498
Epoch 13: Train mse = 0.7210 | Val mse = 0.7302
Epoch 14: Train mse = 0.7018 | Val mse = 0.7275
Epoch 15: Train mse = 0.6797 | Val mse = 0.7290
Epoch 16: Train mse = 0.6672 | Val mse = 0.6941
Epoch 17: Train mse = 0.6546 | Val mse = 0.6990
Epoch 18: Train mse = 0.6259 | Val mse = 0.6984
Epoch 19: Train mse = 0.6014 | Val mse = 0.8137
Epoch 20: Train mse = 0.5918 | Val mse = 0.7111
Epoch 21: Train mse = 0.5718 | Val mse = 0.6760
Epoch 22: Train mse = 0.5657 | Val mse = 0.7061
Epoch 23: Train mse = 0.5478 | Val mse = 0.7094
Epoch 24: Train mse = 0.5286 | Val mse = 0.7596
Epoch 25: Train mse = 0.5159 | Val mse = 0.7015
Epoch 26: Train mse = 0.5029 | Val mse = 0.6649
Epoch 27: Train mse = 0.4890 | Val mse = 0.7361
Epoch 28: Train mse = 0.4748 | Val mse = 0.6893
Epoch 29: Train mse = 0.4643 | Val mse = 0.7022
Epoch 30: Train mse = 0.4655 | Val mse = 0.6840
Epoch 31: Train mse = 0.4507 | Val mse = 0.7071
Epoch 32: Train mse = 0.4484 | Val mse = 0.7751
Epoch 33: Train mse = 0.4335 | Val mse = 0.7130
Epoch 34: Train mse = 0.4331 | Val mse = 0.7393
Epoch 35: Train mse = 0.4259 | Val mse = 0.7036
Epoch 36: Train mse = 0.4153 | Val mse = 0.7646
Early stopping triggered at epoch 36.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 20
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.61014199
1 day(s) RMSE                      : 2.60891120
1 day(s) R2                        : 0.15114766
1 day(s) Pearson r                 : 0.48627095
1 day(s) QLIKE                     : 0.57154295
3 day(s) MAE                       : 0.72311987
3 day(s) RMSE                      : 3.35081269
3 day(s) R2                        : -0.40020900
3 day(s) Pearson r                 : 0.34887806
3 day(s) QLIKE                     : 0.63669379
5 day(s) MAE                       : 0.78506448
5 day(s) RMSE                      : 3.62777461
5 day(s) R2                        : -0.64115001
5 day(s) Pearson r                 : 0.24777164
5 day(s) QLIKE                     : 0.70695204
10 day(s) MAE                      : 0.88173555
10 day(s) RMSE                     : 3.87672806
10 day(s) R2                       : -0.87387039
10 day(s) Pearson r                : 0.15186299
10 day(s) QLIKE                    : 0.84807828
20 day(s) MAE                      : 0.90759089
20 day(s) RMSE                     : 3.75387333
20 day(s) R2                       : -0.75677676
20 day(s) Pearson r                : 0.10791116
20 day(s) QLIKE                    : 0.87219332
full horizon MAE                   : 0.90759089
full horizon RMSE                  : 3.75387333
full horizon R2                    : -0.75677676
full horizon Pearson r             : 0.10791116
full horizon QLIKE                 : 0.87219332

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/SP500/Simple_KAN_H20.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.00822392, max=86.9713
In [ ]:
import os, pickle
from pathlib import Path
import numpy as np


load_path = Path(root_folder) / objects_relative_path / "structured_data_dict_10.pkl"
if not load_path.exists():
    raise FileNotFoundError(f"Pickle not found at: {load_path}")

with open(load_path, "rb") as f:
    structured_data_dict_10 = pickle.load(f)

print("Data dictionary 10 loaded successfully.")

tickers = ["AAPL", "MSFT", "GE", "BAC", "C", "BTCUSDT", "EURUSD", "GOLD", "SP500"]

X10_price_map, X10_time_map, y10_map = {}, {}, {}
missing = []

for t in tickers:
    if t not in structured_data_dict_10:
        missing.append(t)
        continue

    entry = structured_data_dict_10[t]
    Xp10 = entry.get("X_other")  
    Xt10 = entry.get("X_time")   
    Y10  = entry.get("y")        

    if Xp10 is None or Y10 is None:
        print(f"[WARN] {t}: missing"
              f"{' X_other' if Xp10 is None else ''}"
              f"{' and y' if (Xp10 is None and Y10 is None) else (' y' if Y10 is None else '')} - skipping.")
        continue

    try:
        Xp10 = np.asarray(Xp10, dtype=float)
        Y10  = np.asarray(Y10,  dtype=float)
        Xt10 = None if Xt10 is None else np.asarray(Xt10, dtype=float)
    except Exception as e:
        print(f"[WARN] {t}: array conversion failed: {e} - skipping.")
        continue


    if Xp10.ndim != 3 or Y10.ndim != 3:
        print(f"[WARN] {t}: unexpected dims X_other={Xp10.shape}, y={Y10.shape} - skipping.")
        continue
    if Xp10.shape[0] != Y10.shape[0]:
        print(f"[WARN] {t}: X_other and y n_samples mismatch: {Xp10.shape[0]} vs {Y10.shape[0]} - skipping.")
        continue

    if Xt10 is not None:
        if Xt10.ndim != 3:
            print(f"[WARN] {t}: unexpected dims X_time={Xt10.shape} - setting to None.")
            Xt10 = None
        elif Xt10.shape[0] != Xp10.shape[0]:
            print(f"[WARN] {t}: X_time n_samples mismatch: {Xt10.shape[0]} vs {Xp10.shape[0]} - setting to None.")
            Xt10 = None

    X10_price_map[t] = Xp10
    X10_time_map[t]  = Xt10
    y10_map[t]       = Y10

if missing:
    print(f"[INFO] Missing tickers in data dict (skipped): {missing}")
print(f"[INFO] Loaded tickers: {list(X10_price_map.keys())}")


y = y10_map.get("AAPL")
print("AAPL y shape:", None if y is None else y.shape)
Data dictionary 10 loaded successfully.
[INFO] Loaded tickers: ['AAPL', 'MSFT', 'GE', 'BAC', 'C', 'BTCUSDT', 'EURUSD', 'GOLD', 'SP500']
AAPL y shape: (2018, 60, 2)
In [151]:
final_tickers  = ["AAPL", "MSFT", "GE", "BAC", "C", "BTCUSDT", "EURUSD", "GOLD", "SP500"]
final_models   = ["ITransformer"]
final_horizons = [1, 5, 10, 20]

MULTI_TASK_MODELS = {"ITransformer"}

_ = run_all_models_for_all(
    tickers=final_tickers,
    horizons=final_horizons,
    model_list=final_models,              
    X_price_map=X10_price_map,
    X_time_map=X10_time_map,
    y_map=y10_map,
    base_save_dir=FINAL_BASE_SAVE_DIR,
    fixed_params=FIXED_PARAMS_MERGED,    
    common_params=COMMON,
    model_io=MODEL_IO,
    multitask_models=MULTI_TASK_MODELS,
    param_grids=None,                     
    results_store=final_results_store,    
    overwrite=True                       
)
=== AAPL | H=1 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714034
  Min value:  -33.2571324878827
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.133266415235102
  Min value:  -2.354189421011418
Checking X_price_val:
Shape: (161, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.093969981528533
  Min value:  -1.2108152104959184
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.9069843280619727
  Min value:  -1.6295009124362907
Checking X_price_test:
Shape: (404, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.5044175194514073
  Min value:  -2.3858091292041257
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.8950185841626324
  Min value:  -2.1199314093218837
Epoch 1/50  Train 28.9482  Val 0.7880  0.6s
Epoch 2/50  Train 28.4201  Val 0.7867  0.2s
Epoch 3/50  Train 27.9709  Val 0.7854  0.2s
Epoch 4/50  Train 27.5625  Val 0.7842  0.2s
Epoch 5/50  Train 27.1872  Val 0.7832  0.2s
Epoch 6/50  Train 26.8406  Val 0.7821  0.2s
Epoch 7/50  Train 26.5196  Val 0.7812  0.2s
Epoch 8/50  Train 26.2216  Val 0.7803  0.2s
Epoch 9/50  Train 25.9447  Val 0.7795  0.2s
Epoch 10/50  Train 25.6870  Val 0.7788  0.2s
Epoch 11/50  Train 25.4469  Val 0.7781  0.2s
Epoch 12/50  Train 25.2229  Val 0.7775  0.2s
Epoch 13/50  Train 25.0138  Val 0.7769  0.2s
Epoch 14/50  Train 24.8183  Val 0.7764  0.2s
Epoch 15/50  Train 24.6353  Val 0.7759  0.2s
Epoch 16/50  Train 24.4639  Val 0.7754  0.2s
Epoch 17/50  Train 24.3031  Val 0.7750  0.2s
Epoch 18/50  Train 24.1523  Val 0.7746  0.2s
Epoch 19/50  Train 24.0105  Val 0.7742  0.2s
Epoch 20/50  Train 23.8773  Val 0.7739  0.2s
Epoch 21/50  Train 23.7518  Val 0.7736  0.2s
Epoch 22/50  Train 23.6337  Val 0.7733  0.2s
Epoch 23/50  Train 23.5223  Val 0.7730  0.2s
Epoch 24/50  Train 23.4172  Val 0.7728  0.2s
Epoch 25/50  Train 23.3179  Val 0.7725  0.2s
Epoch 26/50  Train 23.2240  Val 0.7723  0.2s
Epoch 27/50  Train 23.1353  Val 0.7721  0.2s
Epoch 28/50  Train 23.0512  Val 0.7720  0.2s
Epoch 29/50  Train 22.9715  Val 0.7718  0.3s
Epoch 30/50  Train 22.8960  Val 0.7716  0.2s
Epoch 31/50  Train 22.8243  Val 0.7715  0.2s
Epoch 32/50  Train 22.7561  Val 0.7714  0.2s
Epoch 33/50  Train 22.6914  Val 0.7713  0.2s
Epoch 34/50  Train 22.6297  Val 0.7712  0.2s
Epoch 35/50  Train 22.5710  Val 0.7711  0.2s
Epoch 36/50  Train 22.5151  Val 0.7710  0.2s
Epoch 37/50  Train 22.4617  Val 0.7709  0.2s
Epoch 38/50  Train 22.4108  Val 0.7708  0.2s
Epoch 39/50  Train 22.3622  Val 0.7707  0.2s
Epoch 40/50  Train 22.3156  Val 0.7707  0.2s
Epoch 41/50  Train 22.2711  Val 0.7706  0.2s
Epoch 42/50  Train 22.2285  Val 0.7706  0.2s
Epoch 43/50  Train 22.1876  Val 0.7705  0.2s
Epoch 44/50  Train 22.1484  Val 0.7705  0.2s
Epoch 45/50  Train 22.1108  Val 0.7705  0.2s
Epoch 46/50  Train 22.0747  Val 0.7704  0.2s
Epoch 47/50  Train 22.0399  Val 0.7704  0.2s
Epoch 48/50  Train 22.0065  Val 0.7704  0.2s
Epoch 49/50  Train 21.9743  Val 0.7704  0.2s
Epoch 50/50  Train 21.9433  Val 0.7703  0.2s

Parameters used in the single-fit model:
input_len: 60
output_len: 1
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.44080942
1 day(s) RMSE                      : 8.71817512
1 day(s) R2                        : -0.01756961
1 day(s) Pearson r                 : 0.23056800
1 day(s) QLIKE                     : 0.53330540
full horizon MAE                   : 2.44080942
full horizon RMSE                  : 8.71817512
full horizon R2                    : -0.01756961
full horizon Pearson r             : 0.23056800
full horizon QLIKE                 : 0.53330540

--- Task 2 ---
1 day(s) MAE                       : 0.58801941
1 day(s) RMSE                      : 1.24304818
1 day(s) R2                        : -0.28827194
1 day(s) Pearson r                 : 0.00701819
1 day(s) QLIKE                     : 13.90310447
full horizon MAE                   : 0.58801941
full horizon RMSE                  : 1.24304818
full horizon R2                    : -0.28827194
full horizon Pearson r             : 0.00701819
full horizon QLIKE                 : 13.90310447

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/AAPL/ITransformer_H1.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=2.40867, max=2.4793

=== AAPL | H=5 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714034
  Min value:  -33.2571324878827
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.140231129276028
  Min value:  -2.3583145800692082
Checking X_price_val:
Shape: (161, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.093969981528533
  Min value:  -1.2108152104959184
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.9079382526637185
  Min value:  -1.632926464574084
Checking X_price_test:
Shape: (404, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.5044175194514073
  Min value:  -2.3858091292041257
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.8969263469922275
  Min value:  -2.123830418076332
Epoch 1/50  Train 29.5854  Val 0.8073  0.5s
Epoch 2/50  Train 29.2640  Val 0.8061  0.2s
Epoch 3/50  Train 28.9742  Val 0.8050  0.2s
Epoch 4/50  Train 28.7001  Val 0.8040  0.2s
Epoch 5/50  Train 28.4402  Val 0.8030  0.2s
Epoch 6/50  Train 28.1934  Val 0.8020  0.2s
Epoch 7/50  Train 27.9586  Val 0.8011  0.2s
Epoch 8/50  Train 27.7347  Val 0.8002  0.2s
Epoch 9/50  Train 27.5210  Val 0.7994  0.2s
Epoch 10/50  Train 27.3166  Val 0.7985  0.2s
Epoch 11/50  Train 27.1210  Val 0.7977  0.2s
Epoch 12/50  Train 26.9335  Val 0.7970  0.2s
Epoch 13/50  Train 26.7537  Val 0.7962  0.2s
Epoch 14/50  Train 26.5811  Val 0.7955  0.2s
Epoch 15/50  Train 26.4154  Val 0.7948  0.2s
Epoch 16/50  Train 26.2561  Val 0.7941  0.2s
Epoch 17/50  Train 26.1031  Val 0.7935  0.2s
Epoch 18/50  Train 25.9560  Val 0.7929  0.2s
Epoch 19/50  Train 25.8145  Val 0.7923  0.2s
Epoch 20/50  Train 25.6784  Val 0.7917  0.2s
Epoch 21/50  Train 25.5474  Val 0.7911  0.2s
Epoch 22/50  Train 25.4214  Val 0.7906  0.2s
Epoch 23/50  Train 25.3000  Val 0.7900  0.2s
Epoch 24/50  Train 25.1832  Val 0.7895  0.2s
Epoch 25/50  Train 25.0707  Val 0.7890  0.2s
Epoch 26/50  Train 24.9624  Val 0.7885  0.2s
Epoch 27/50  Train 24.8581  Val 0.7881  0.2s
Epoch 28/50  Train 24.7576  Val 0.7876  0.3s
Epoch 29/50  Train 24.6608  Val 0.7872  0.2s
Epoch 30/50  Train 24.5674  Val 0.7868  0.2s
Epoch 31/50  Train 24.4775  Val 0.7864  0.3s
Epoch 32/50  Train 24.3908  Val 0.7860  0.2s
Epoch 33/50  Train 24.3072  Val 0.7856  0.2s
Epoch 34/50  Train 24.2266  Val 0.7852  0.2s
Epoch 35/50  Train 24.1489  Val 0.7848  0.2s
Epoch 36/50  Train 24.0739  Val 0.7845  0.2s
Epoch 37/50  Train 24.0016  Val 0.7842  0.2s
Epoch 38/50  Train 23.9318  Val 0.7838  0.2s
Epoch 39/50  Train 23.8645  Val 0.7835  0.2s
Epoch 40/50  Train 23.7995  Val 0.7832  0.2s
Epoch 41/50  Train 23.7368  Val 0.7829  0.4s
Epoch 42/50  Train 23.6762  Val 0.7826  0.3s
Epoch 43/50  Train 23.6177  Val 0.7823  0.3s
Epoch 44/50  Train 23.5611  Val 0.7821  0.3s
Epoch 45/50  Train 23.5065  Val 0.7818  0.3s
Epoch 46/50  Train 23.4538  Val 0.7816  0.3s
Epoch 47/50  Train 23.4027  Val 0.7813  0.3s
Epoch 48/50  Train 23.3534  Val 0.7811  0.3s
Epoch 49/50  Train 23.3057  Val 0.7808  0.3s
Epoch 50/50  Train 23.2596  Val 0.7806  0.3s

Parameters used in the single-fit model:
input_len: 60
output_len: 5
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.44210174
1 day(s) RMSE                      : 8.71798493
1 day(s) R2                        : -0.01752521
1 day(s) Pearson r                 : 0.18403635
1 day(s) QLIKE                     : 0.53368233
3 day(s) MAE                       : 2.52920014
3 day(s) RMSE                      : 8.97471969
3 day(s) R2                        : -0.01920720
3 day(s) Pearson r                 : 0.25052337
3 day(s) QLIKE                     : 0.55066899
5 day(s) MAE                       : 2.55297706
5 day(s) RMSE                      : 9.02791015
5 day(s) R2                        : -0.01994404
5 day(s) Pearson r                 : 0.19791944
5 day(s) QLIKE                     : 0.55502788
full horizon MAE                   : 2.55297706
full horizon RMSE                  : 9.02791015
full horizon R2                    : -0.01994404
full horizon Pearson r             : 0.19791944
full horizon QLIKE                 : 0.55502788

--- Task 2 ---
1 day(s) MAE                       : 0.58801728
1 day(s) RMSE                      : 1.24304568
1 day(s) R2                        : -0.28826676
1 day(s) Pearson r                 : 0.01739471
1 day(s) QLIKE                     : 14.35501510
3 day(s) MAE                       : 0.59012472
3 day(s) RMSE                      : 1.24572638
3 day(s) R2                        : -0.28932746
3 day(s) Pearson r                 : 0.02783502
3 day(s) QLIKE                     : 13.83295252
5 day(s) MAE                       : 0.59116921
5 day(s) RMSE                      : 1.24664024
5 day(s) R2                        : -0.29010270
5 day(s) Pearson r                 : 0.00347599
5 day(s) QLIKE                     : 13.72215779
full horizon MAE                   : 0.59116921
full horizon RMSE                  : 1.24664024
full horizon R2                    : -0.29010270
full horizon Pearson r             : 0.00347599
full horizon QLIKE                 : 13.72215779

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/AAPL/ITransformer_H5.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=2.39799, max=2.54057

=== AAPL | H=10 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2018
Time steps for y: 10
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714034
  Min value:  -33.2571324878827
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.149858145997081
  Min value:  -2.3634631558377763
Checking X_price_val:
Shape: (161, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.093969981528533
  Min value:  -1.2108152104959184
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.909556794039411
  Min value:  -1.6371429190585962
Checking X_price_test:
Shape: (404, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.5044175194514073
  Min value:  -2.3858091292041257
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.8998157347365185
  Min value:  -2.1286776824868854
Epoch 1/50  Train 29.9010  Val 0.8068  0.6s
Epoch 2/50  Train 29.6512  Val 0.8060  0.2s
Epoch 3/50  Train 29.4229  Val 0.8052  0.2s
Epoch 4/50  Train 29.2040  Val 0.8045  0.3s
Epoch 5/50  Train 28.9939  Val 0.8038  0.2s
Epoch 6/50  Train 28.7920  Val 0.8031  0.2s
Epoch 7/50  Train 28.5978  Val 0.8024  0.2s
Epoch 8/50  Train 28.4107  Val 0.8018  0.2s
Epoch 9/50  Train 28.2302  Val 0.8012  0.2s
Epoch 10/50  Train 28.0558  Val 0.8006  0.2s
Epoch 11/50  Train 27.8873  Val 0.8000  0.2s
Epoch 12/50  Train 27.7242  Val 0.7994  0.2s
Epoch 13/50  Train 27.5662  Val 0.7988  0.2s
Epoch 14/50  Train 27.4131  Val 0.7983  0.2s
Epoch 15/50  Train 27.2646  Val 0.7978  0.2s
Epoch 16/50  Train 27.1205  Val 0.7973  0.2s
Epoch 17/50  Train 26.9806  Val 0.7968  0.2s
Epoch 18/50  Train 26.8447  Val 0.7963  0.2s
Epoch 19/50  Train 26.7126  Val 0.7958  0.2s
Epoch 20/50  Train 26.5843  Val 0.7953  0.2s
Epoch 21/50  Train 26.4594  Val 0.7948  0.2s
Epoch 22/50  Train 26.3380  Val 0.7944  0.2s
Epoch 23/50  Train 26.2198  Val 0.7940  0.2s
Epoch 24/50  Train 26.1049  Val 0.7935  0.2s
Epoch 25/50  Train 25.9930  Val 0.7931  0.3s
Epoch 26/50  Train 25.8841  Val 0.7927  0.2s
Epoch 27/50  Train 25.7780  Val 0.7923  0.3s
Epoch 28/50  Train 25.6748  Val 0.7919  0.2s
Epoch 29/50  Train 25.5743  Val 0.7915  0.3s
Epoch 30/50  Train 25.4765  Val 0.7912  0.3s
Epoch 31/50  Train 25.3813  Val 0.7908  0.3s
Epoch 32/50  Train 25.2886  Val 0.7904  0.3s
Epoch 33/50  Train 25.1983  Val 0.7901  0.3s
Epoch 34/50  Train 25.1104  Val 0.7898  0.3s
Epoch 35/50  Train 25.0249  Val 0.7894  0.3s
Epoch 36/50  Train 24.9417  Val 0.7891  0.3s
Epoch 37/50  Train 24.8607  Val 0.7888  0.3s
Epoch 38/50  Train 24.7819  Val 0.7885  0.3s
Epoch 39/50  Train 24.7052  Val 0.7882  0.2s
Epoch 40/50  Train 24.6306  Val 0.7879  0.2s
Epoch 41/50  Train 24.5581  Val 0.7876  0.3s
Epoch 42/50  Train 24.4876  Val 0.7873  0.2s
Epoch 43/50  Train 24.4190  Val 0.7870  0.2s
Epoch 44/50  Train 24.3524  Val 0.7868  0.2s
Epoch 45/50  Train 24.2877  Val 0.7865  0.2s
Epoch 46/50  Train 24.2248  Val 0.7862  0.2s
Epoch 47/50  Train 24.1637  Val 0.7860  0.2s
Epoch 48/50  Train 24.1044  Val 0.7858  0.2s
Epoch 49/50  Train 24.0469  Val 0.7855  0.2s
Epoch 50/50  Train 23.9910  Val 0.7853  0.2s

Parameters used in the single-fit model:
input_len: 60
output_len: 10
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.44358512
1 day(s) RMSE                      : 8.71717953
1 day(s) R2                        : -0.01733721
1 day(s) Pearson r                 : 0.18702392
1 day(s) QLIKE                     : 0.53358636
3 day(s) MAE                       : 2.53067193
3 day(s) RMSE                      : 8.97408416
3 day(s) R2                        : -0.01906286
3 day(s) Pearson r                 : 0.23870684
3 day(s) QLIKE                     : 0.55074560
5 day(s) MAE                       : 2.55446230
5 day(s) RMSE                      : 9.02730955
5 day(s) R2                        : -0.01980834
5 day(s) Pearson r                 : 0.18352002
5 day(s) QLIKE                     : 0.55513935
10 day(s) MAE                      : 2.58105813
10 day(s) RMSE                     : 9.06859530
10 day(s) R2                       : -0.02069214
10 day(s) Pearson r                : 0.14998041
10 day(s) QLIKE                    : 0.55811205
full horizon MAE                   : 2.58105813
full horizon RMSE                  : 9.06859530
full horizon R2                    : -0.02069214
full horizon Pearson r             : 0.14998041
full horizon QLIKE                 : 0.55811205

--- Task 2 ---
1 day(s) MAE                       : 0.58801784
1 day(s) RMSE                      : 1.24304395
1 day(s) R2                        : -0.28826318
1 day(s) Pearson r                 : 0.02206237
1 day(s) QLIKE                     : 14.47466948
3 day(s) MAE                       : 0.59012405
3 day(s) RMSE                      : 1.24572343
3 day(s) R2                        : -0.28932136
3 day(s) Pearson r                 : 0.05276251
3 day(s) QLIKE                     : 13.92031835
5 day(s) MAE                       : 0.59116784
5 day(s) RMSE                      : 1.24663820
5 day(s) R2                        : -0.29009848
5 day(s) Pearson r                 : 0.01634935
5 day(s) QLIKE                     : 13.76947273
10 day(s) MAE                      : 0.59762079
10 day(s) RMSE                     : 1.25432471
10 day(s) R2                       : -0.29365152
10 day(s) Pearson r                : 0.01606620
10 day(s) QLIKE                    : 13.96813981
full horizon MAE                   : 0.59762079
full horizon RMSE                  : 1.25432471
full horizon R2                    : -0.29365152
full horizon Pearson r             : 0.01606620
full horizon QLIKE                 : 13.96813981

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/AAPL/ITransformer_H10.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=2.40231, max=2.55167

=== AAPL | H=20 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2018
Time steps for y: 20
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714034
  Min value:  -33.2571324878827
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.174045403839706
  Min value:  -2.3775104768031508
Checking X_price_val:
Shape: (161, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.093969981528533
  Min value:  -1.2108152104959184
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.913020640489958
  Min value:  -1.6487782037743999
Checking X_price_test:
Shape: (404, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.5044175194514073
  Min value:  -2.3858091292041257
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.906568131251448
  Min value:  -2.1419453045757613
Epoch 1/50  Train 30.4838  Val 0.8062  0.4s
Epoch 2/50  Train 30.2814  Val 0.8056  0.2s
Epoch 3/50  Train 30.0939  Val 0.8049  0.2s
Epoch 4/50  Train 29.9122  Val 0.8043  0.2s
Epoch 5/50  Train 29.7358  Val 0.8038  0.2s
Epoch 6/50  Train 29.5644  Val 0.8032  0.2s
Epoch 7/50  Train 29.3977  Val 0.8027  0.2s
Epoch 8/50  Train 29.2353  Val 0.8021  0.2s
Epoch 9/50  Train 29.0770  Val 0.8016  0.2s
Epoch 10/50  Train 28.9225  Val 0.8011  0.2s
Epoch 11/50  Train 28.7715  Val 0.8006  0.2s
Epoch 12/50  Train 28.6237  Val 0.8001  0.4s
Epoch 13/50  Train 28.4790  Val 0.7997  0.3s
Epoch 14/50  Train 28.3371  Val 0.7992  0.3s
Epoch 15/50  Train 28.1978  Val 0.7987  0.3s
Epoch 16/50  Train 28.0610  Val 0.7983  0.3s
Epoch 17/50  Train 27.9265  Val 0.7978  0.3s
Epoch 18/50  Train 27.7941  Val 0.7974  0.3s
Epoch 19/50  Train 27.6637  Val 0.7970  0.3s
Epoch 20/50  Train 27.5353  Val 0.7965  0.4s
Epoch 21/50  Train 27.4086  Val 0.7961  0.2s
Epoch 22/50  Train 27.2836  Val 0.7957  0.2s
Epoch 23/50  Train 27.1602  Val 0.7953  0.2s
Epoch 24/50  Train 27.0383  Val 0.7949  0.3s
Epoch 25/50  Train 26.9179  Val 0.7945  0.3s
Epoch 26/50  Train 26.7989  Val 0.7941  0.3s
Epoch 27/50  Train 26.6813  Val 0.7937  0.3s
Epoch 28/50  Train 26.5651  Val 0.7933  0.2s
Epoch 29/50  Train 26.4502  Val 0.7929  0.3s
Epoch 30/50  Train 26.3368  Val 0.7925  0.3s
Epoch 31/50  Train 26.2247  Val 0.7921  0.3s
Epoch 32/50  Train 26.1140  Val 0.7917  0.4s
Epoch 33/50  Train 26.0048  Val 0.7914  0.3s
Epoch 34/50  Train 25.8971  Val 0.7910  0.3s
Epoch 35/50  Train 25.7909  Val 0.7907  0.2s
Epoch 36/50  Train 25.6864  Val 0.7903  0.2s
Epoch 37/50  Train 25.5836  Val 0.7899  0.2s
Epoch 38/50  Train 25.4826  Val 0.7896  0.2s
Epoch 39/50  Train 25.3835  Val 0.7893  0.2s
Epoch 40/50  Train 25.2864  Val 0.7889  0.2s
Epoch 41/50  Train 25.1913  Val 0.7886  0.2s
Epoch 42/50  Train 25.0984  Val 0.7883  0.2s
Epoch 43/50  Train 25.0077  Val 0.7880  0.2s
Epoch 44/50  Train 24.9193  Val 0.7877  0.3s
Epoch 45/50  Train 24.8333  Val 0.7874  0.3s
Epoch 46/50  Train 24.7497  Val 0.7871  0.3s
Epoch 47/50  Train 24.6686  Val 0.7868  0.3s
Epoch 48/50  Train 24.5900  Val 0.7865  0.3s
Epoch 49/50  Train 24.5140  Val 0.7863  0.2s
Epoch 50/50  Train 24.4406  Val 0.7860  0.2s

Parameters used in the single-fit model:
input_len: 60
output_len: 20
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.44816797
1 day(s) RMSE                      : 8.71510547
1 day(s) R2                        : -0.01685317
1 day(s) Pearson r                 : 0.19098873
1 day(s) QLIKE                     : 0.53354865
3 day(s) MAE                       : 2.53511345
3 day(s) RMSE                      : 8.97212464
3 day(s) R2                        : -0.01861788
3 day(s) Pearson r                 : 0.22948939
3 day(s) QLIKE                     : 0.55087659
5 day(s) MAE                       : 2.55890649
5 day(s) RMSE                      : 9.02527036
5 day(s) R2                        : -0.01934766
5 day(s) Pearson r                 : 0.17730911
5 day(s) QLIKE                     : 0.55522359
10 day(s) MAE                      : 2.58530029
10 day(s) RMSE                     : 9.06650799
10 day(s) R2                       : -0.02022233
10 day(s) Pearson r                : 0.14582351
10 day(s) QLIKE                    : 0.55817178
20 day(s) MAE                      : 2.61233843
20 day(s) RMSE                     : 9.09177679
20 day(s) R2                       : -0.02155314
20 day(s) Pearson r                : 0.10634949
20 day(s) QLIKE                    : 0.55863232
full horizon MAE                   : 2.61233843
full horizon RMSE                  : 9.09177679
full horizon R2                    : -0.02155314
full horizon Pearson r             : 0.10634949
full horizon QLIKE                 : 0.55863232

--- Task 2 ---
1 day(s) MAE                       : 0.58801968
1 day(s) RMSE                      : 1.24304510
1 day(s) R2                        : -0.28826556
1 day(s) Pearson r                 : 0.01640296
1 day(s) QLIKE                     : 14.45751552
3 day(s) MAE                       : 0.59012454
3 day(s) RMSE                      : 1.24572290
3 day(s) R2                        : -0.28932026
3 day(s) Pearson r                 : 0.06044757
3 day(s) QLIKE                     : 13.89842196
5 day(s) MAE                       : 0.59116803
5 day(s) RMSE                      : 1.24663787
5 day(s) R2                        : -0.29009778
5 day(s) Pearson r                 : 0.02699362
5 day(s) QLIKE                     : 13.72607222
10 day(s) MAE                      : 0.59762076
10 day(s) RMSE                     : 1.25432436
10 day(s) R2                       : -0.29365080
10 day(s) Pearson r                : 0.02647000
10 day(s) QLIKE                    : 13.87239803
20 day(s) MAE                      : 0.60268958
20 day(s) RMSE                     : 1.25998970
20 day(s) R2                       : -0.29666513
20 day(s) Pearson r                : 0.00759941
20 day(s) QLIKE                    : 13.91005691
full horizon MAE                   : 0.60268958
full horizon RMSE                  : 1.25998970
full horizon R2                    : -0.29666513
full horizon Pearson r             : 0.00759941
full horizon QLIKE                 : 13.91005691

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/AAPL/ITransformer_H20.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=2.41599, max=2.5724

=== MSFT | H=1 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.550928529745745
  Min value:  -8.748202192355542
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.218119847017174
  Min value:  -2.8051449830585735
Checking X_price_val:
Shape: (161, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.2612343368490615
  Min value:  -4.427436593443818
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.8947596773346853
  Min value:  -1.3150487901568413
Checking X_price_test:
Shape: (404, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.030086059790657
  Min value:  -3.528134593538675
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.930619914036179
  Min value:  -2.3299350512435675
Epoch 1/50  Train 25.5171  Val 0.8516  0.2s
Epoch 2/50  Train 24.8413  Val 0.8399  0.2s
Epoch 3/50  Train 24.2874  Val 0.8300  0.2s
Epoch 4/50  Train 23.8052  Val 0.8215  0.2s
Epoch 5/50  Train 23.3828  Val 0.8142  0.2s
Epoch 6/50  Train 23.0105  Val 0.8079  0.2s
Epoch 7/50  Train 22.6803  Val 0.8024  0.2s
Epoch 8/50  Train 22.3860  Val 0.7977  0.2s
Epoch 9/50  Train 22.1221  Val 0.7936  0.2s
Epoch 10/50  Train 21.8846  Val 0.7900  0.2s
Epoch 11/50  Train 21.6697  Val 0.7868  0.2s
Epoch 12/50  Train 21.4744  Val 0.7841  0.2s
Epoch 13/50  Train 21.2964  Val 0.7817  0.2s
Epoch 14/50  Train 21.1334  Val 0.7796  0.2s
Epoch 15/50  Train 20.9836  Val 0.7778  0.3s
Epoch 16/50  Train 20.8455  Val 0.7762  0.2s
Epoch 17/50  Train 20.7177  Val 0.7749  0.2s
Epoch 18/50  Train 20.5992  Val 0.7737  0.2s
Epoch 19/50  Train 20.4889  Val 0.7727  0.2s
Epoch 20/50  Train 20.3859  Val 0.7718  0.2s
Epoch 21/50  Train 20.2895  Val 0.7710  0.2s
Epoch 22/50  Train 20.1991  Val 0.7703  0.2s
Epoch 23/50  Train 20.1139  Val 0.7698  0.2s
Epoch 24/50  Train 20.0336  Val 0.7693  0.2s
Epoch 25/50  Train 19.9576  Val 0.7689  0.2s
Epoch 26/50  Train 19.8856  Val 0.7685  0.3s
Epoch 27/50  Train 19.8172  Val 0.7682  0.3s
Epoch 28/50  Train 19.7521  Val 0.7680  0.2s
Epoch 29/50  Train 19.6899  Val 0.7678  0.3s
Epoch 30/50  Train 19.6304  Val 0.7676  0.3s
Epoch 31/50  Train 19.5735  Val 0.7675  0.3s
Epoch 32/50  Train 19.5188  Val 0.7674  0.3s
Epoch 33/50  Train 19.4663  Val 0.7673  0.2s
Epoch 34/50  Train 19.4156  Val 0.7673  0.3s
Epoch 35/50  Train 19.3668  Val 0.7673  0.3s
Epoch 36/50  Train 19.3196  Val 0.7672  0.3s
Epoch 37/50  Train 19.2739  Val 0.7672  0.3s
Epoch 38/50  Train 19.2297  Val 0.7673  0.2s
Epoch 39/50  Train 19.1867  Val 0.7673  0.2s
Epoch 40/50  Train 19.1449  Val 0.7673  0.2s
Epoch 41/50  Train 19.1043  Val 0.7674  0.2s
Epoch 42/50  Train 19.0647  Val 0.7674  0.2s
Epoch 43/50  Train 19.0261  Val 0.7675  0.2s
Epoch 44/50  Train 18.9884  Val 0.7675  0.2s
Epoch 45/50  Train 18.9515  Val 0.7676  0.2s
Epoch 46/50  Train 18.9154  Val 0.7677  0.2s
Early stopping triggered.

Parameters used in the single-fit model:
input_len: 60
output_len: 1
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 1.93130813
1 day(s) RMSE                      : 6.64769993
1 day(s) R2                        : 0.01636499
1 day(s) Pearson r                 : 0.22558978
1 day(s) QLIKE                     : 0.41528547
full horizon MAE                   : 1.93130813
full horizon RMSE                  : 6.64769993
full horizon R2                    : 0.01636499
full horizon Pearson r             : 0.22558978
full horizon QLIKE                 : 0.41528547

--- Task 2 ---
1 day(s) MAE                       : 0.53391318
1 day(s) RMSE                      : 0.99900463
1 day(s) R2                        : -0.39964879
1 day(s) Pearson r                 : 0.00627441
1 day(s) QLIKE                     : 16.38492426
full horizon MAE                   : 0.53391318
full horizon RMSE                  : 0.99900463
full horizon R2                    : -0.39964879
full horizon Pearson r             : 0.00627441
full horizon QLIKE                 : 16.38492426

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/MSFT/ITransformer_H1.pkl

Saved y_true min=0.27301, max=89.8516
Saved y_pred min=0.963975, max=7.47938

=== MSFT | H=5 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.550928529745745
  Min value:  -8.748202192355542
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.218387359181361
  Min value:  -2.808479041395319
Checking X_price_val:
Shape: (161, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.2612343368490615
  Min value:  -4.427436593443818
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.894348562811063
  Min value:  -1.3176187186164212
Checking X_price_test:
Shape: (404, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.030086059790657
  Min value:  -3.528134593538675
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.930739994585198
  Min value:  -2.3330254192007547
Epoch 1/50  Train 27.9544  Val 0.9425  0.2s
Epoch 2/50  Train 27.4853  Val 0.9333  0.2s
Epoch 3/50  Train 27.0721  Val 0.9248  0.2s
Epoch 4/50  Train 26.6884  Val 0.9170  0.2s
Epoch 5/50  Train 26.3314  Val 0.9097  0.2s
Epoch 6/50  Train 25.9987  Val 0.9029  0.2s
Epoch 7/50  Train 25.6879  Val 0.8965  0.2s
Epoch 8/50  Train 25.3972  Val 0.8905  0.3s
Epoch 9/50  Train 25.1247  Val 0.8849  0.2s
Epoch 10/50  Train 24.8690  Val 0.8797  0.3s
Epoch 11/50  Train 24.6287  Val 0.8747  0.2s
Epoch 12/50  Train 24.4026  Val 0.8701  0.2s
Epoch 13/50  Train 24.1896  Val 0.8657  0.2s
Epoch 14/50  Train 23.9887  Val 0.8615  0.2s
Epoch 15/50  Train 23.7990  Val 0.8576  0.2s
Epoch 16/50  Train 23.6198  Val 0.8540  0.2s
Epoch 17/50  Train 23.4503  Val 0.8505  0.2s
Epoch 18/50  Train 23.2898  Val 0.8472  0.2s
Epoch 19/50  Train 23.1378  Val 0.8441  0.2s
Epoch 20/50  Train 22.9936  Val 0.8412  0.2s
Epoch 21/50  Train 22.8568  Val 0.8384  0.2s
Epoch 22/50  Train 22.7268  Val 0.8358  0.2s
Epoch 23/50  Train 22.6032  Val 0.8333  0.2s
Epoch 24/50  Train 22.4857  Val 0.8309  0.3s
Epoch 25/50  Train 22.3738  Val 0.8287  0.3s
Epoch 26/50  Train 22.2671  Val 0.8266  0.3s
Epoch 27/50  Train 22.1654  Val 0.8246  0.3s
Epoch 28/50  Train 22.0684  Val 0.8227  0.4s
Epoch 29/50  Train 21.9757  Val 0.8208  0.3s
Epoch 30/50  Train 21.8871  Val 0.8191  0.3s
Epoch 31/50  Train 21.8023  Val 0.8175  0.3s
Epoch 32/50  Train 21.7212  Val 0.8159  0.3s
Epoch 33/50  Train 21.6434  Val 0.8144  0.3s
Epoch 34/50  Train 21.5689  Val 0.8130  0.3s
Epoch 35/50  Train 21.4974  Val 0.8117  0.3s
Epoch 36/50  Train 21.4287  Val 0.8104  0.4s
Epoch 37/50  Train 21.3627  Val 0.8092  0.2s
Epoch 38/50  Train 21.2993  Val 0.8080  0.2s
Epoch 39/50  Train 21.2382  Val 0.8069  0.2s
Epoch 40/50  Train 21.1794  Val 0.8058  0.2s
Epoch 41/50  Train 21.1228  Val 0.8048  0.2s
Epoch 42/50  Train 21.0681  Val 0.8038  0.2s
Epoch 43/50  Train 21.0154  Val 0.8029  0.3s
Epoch 44/50  Train 20.9644  Val 0.8020  0.2s
Epoch 45/50  Train 20.9152  Val 0.8011  0.3s
Epoch 46/50  Train 20.8676  Val 0.8003  0.2s
Epoch 47/50  Train 20.8215  Val 0.7995  0.2s
Epoch 48/50  Train 20.7769  Val 0.7988  0.2s
Epoch 49/50  Train 20.7337  Val 0.7980  0.2s
Epoch 50/50  Train 20.6918  Val 0.7974  0.2s

Parameters used in the single-fit model:
input_len: 60
output_len: 5
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 1.97627461
1 day(s) RMSE                      : 6.68501075
1 day(s) R2                        : 0.00529253
1 day(s) Pearson r                 : 0.18699502
1 day(s) QLIKE                     : 0.43123132
3 day(s) MAE                       : 1.99440687
3 day(s) RMSE                      : 6.74965851
3 day(s) R2                        : -0.01410704
3 day(s) Pearson r                 : 0.10656162
3 day(s) QLIKE                     : 0.45415782
5 day(s) MAE                       : 2.03107874
5 day(s) RMSE                      : 6.77996215
5 day(s) R2                        : -0.02311867
5 day(s) Pearson r                 : 0.06330995
5 day(s) QLIKE                     : 0.46746330
full horizon MAE                   : 2.03107874
full horizon RMSE                  : 6.77996215
full horizon R2                    : -0.02311867
full horizon Pearson r             : 0.06330995
full horizon QLIKE                 : 0.46746330

--- Task 2 ---
1 day(s) MAE                       : 0.53384152
1 day(s) RMSE                      : 0.99891076
1 day(s) R2                        : -0.39938579
1 day(s) Pearson r                 : 0.03816504
1 day(s) QLIKE                     : 16.97207801
3 day(s) MAE                       : 0.53222354
3 day(s) RMSE                      : 0.99802797
3 day(s) R2                        : -0.39719198
3 day(s) Pearson r                 : 0.01506260
3 day(s) QLIKE                     : 16.78885061
5 day(s) MAE                       : 0.53116226
5 day(s) RMSE                      : 0.99730565
5 day(s) R2                        : -0.39581962
5 day(s) Pearson r                 : 0.01332828
5 day(s) QLIKE                     : 16.81870497
full horizon MAE                   : 0.53116226
full horizon RMSE                  : 0.99730565
full horizon R2                    : -0.39581962
full horizon Pearson r             : 0.01332828
full horizon QLIKE                 : 16.81870497

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/MSFT/ITransformer_H5.pkl

Saved y_true min=0.27301, max=89.8516
Saved y_pred min=0.904334, max=8.09818

=== MSFT | H=10 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2018
Time steps for y: 10
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.550928529745745
  Min value:  -8.748202192355542
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.220877759176237
  Min value:  -2.814364260545607
Checking X_price_val:
Shape: (161, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.2612343368490615
  Min value:  -4.427436593443818
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.895260784986747
  Min value:  -1.3217269183172993
Checking X_price_test:
Shape: (404, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.030086059790657
  Min value:  -3.528134593538675
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9328875355250386
  Min value:  -2.3383439250774902
Epoch 1/50  Train 28.0299  Val 0.9400  0.3s
Epoch 2/50  Train 27.6940  Val 0.9329  0.3s
Epoch 3/50  Train 27.3928  Val 0.9263  0.2s
Epoch 4/50  Train 27.1079  Val 0.9200  0.2s
Epoch 5/50  Train 26.8382  Val 0.9141  0.2s
Epoch 6/50  Train 26.5826  Val 0.9084  0.2s
Epoch 7/50  Train 26.3402  Val 0.9031  0.2s
Epoch 8/50  Train 26.1100  Val 0.8980  0.2s
Epoch 9/50  Train 25.8913  Val 0.8932  0.2s
Epoch 10/50  Train 25.6832  Val 0.8886  0.3s
Epoch 11/50  Train 25.4852  Val 0.8842  0.2s
Epoch 12/50  Train 25.2965  Val 0.8800  0.2s
Epoch 13/50  Train 25.1166  Val 0.8760  0.2s
Epoch 14/50  Train 24.9450  Val 0.8722  0.2s
Epoch 15/50  Train 24.7811  Val 0.8686  0.3s
Epoch 16/50  Train 24.6246  Val 0.8651  0.3s
Epoch 17/50  Train 24.4750  Val 0.8617  0.3s
Epoch 18/50  Train 24.3318  Val 0.8585  0.3s
Epoch 19/50  Train 24.1949  Val 0.8554  0.2s
Epoch 20/50  Train 24.0637  Val 0.8524  0.2s
Epoch 21/50  Train 23.9381  Val 0.8496  0.2s
Epoch 22/50  Train 23.8176  Val 0.8469  0.2s
Epoch 23/50  Train 23.7021  Val 0.8443  0.2s
Epoch 24/50  Train 23.5913  Val 0.8418  0.3s
Epoch 25/50  Train 23.4850  Val 0.8393  0.3s
Epoch 26/50  Train 23.3828  Val 0.8370  0.3s
Epoch 27/50  Train 23.2847  Val 0.8348  0.3s
Epoch 28/50  Train 23.1904  Val 0.8326  0.3s
Epoch 29/50  Train 23.0997  Val 0.8306  0.3s
Epoch 30/50  Train 23.0124  Val 0.8286  0.3s
Epoch 31/50  Train 22.9284  Val 0.8267  0.3s
Epoch 32/50  Train 22.8476  Val 0.8248  0.3s
Epoch 33/50  Train 22.7697  Val 0.8230  0.2s
Epoch 34/50  Train 22.6946  Val 0.8213  0.2s
Epoch 35/50  Train 22.6222  Val 0.8197  0.2s
Epoch 36/50  Train 22.5524  Val 0.8181  0.2s
Epoch 37/50  Train 22.4851  Val 0.8166  0.2s
Epoch 38/50  Train 22.4200  Val 0.8151  0.2s
Epoch 39/50  Train 22.3572  Val 0.8137  0.2s
Epoch 40/50  Train 22.2965  Val 0.8123  0.2s
Epoch 41/50  Train 22.2379  Val 0.8109  0.2s
Epoch 42/50  Train 22.1811  Val 0.8097  0.2s
Epoch 43/50  Train 22.1262  Val 0.8084  0.2s
Epoch 44/50  Train 22.0731  Val 0.8072  0.2s
Epoch 45/50  Train 22.0216  Val 0.8061  0.3s
Epoch 46/50  Train 21.9718  Val 0.8049  0.2s
Epoch 47/50  Train 21.9234  Val 0.8039  0.2s
Epoch 48/50  Train 21.8766  Val 0.8028  0.3s
Epoch 49/50  Train 21.8311  Val 0.8018  0.2s
Epoch 50/50  Train 21.7870  Val 0.8008  0.2s

Parameters used in the single-fit model:
input_len: 60
output_len: 10
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 1.97860969
1 day(s) RMSE                      : 6.67293078
1 day(s) R2                        : 0.00888420
1 day(s) Pearson r                 : 0.19335122
1 day(s) QLIKE                     : 0.43269424
3 day(s) MAE                       : 2.00713042
3 day(s) RMSE                      : 6.75287876
3 day(s) R2                        : -0.01507493
3 day(s) Pearson r                 : 0.09987256
3 day(s) QLIKE                     : 0.46023647
5 day(s) MAE                       : 2.04797289
5 day(s) RMSE                      : 6.78637125
5 day(s) R2                        : -0.02505389
5 day(s) Pearson r                 : 0.05309725
5 day(s) QLIKE                     : 0.47401673
10 day(s) MAE                      : 2.07400893
10 day(s) RMSE                     : 6.81344559
10 day(s) R2                       : -0.03317338
10 day(s) Pearson r                : 0.01635198
10 day(s) QLIKE                    : 0.48986187
full horizon MAE                   : 2.07400893
full horizon RMSE                  : 6.81344559
full horizon R2                    : -0.03317338
full horizon Pearson r             : 0.01635198
full horizon QLIKE                 : 0.48986187

--- Task 2 ---
1 day(s) MAE                       : 0.53382213
1 day(s) RMSE                      : 0.99887603
1 day(s) R2                        : -0.39928849
1 day(s) Pearson r                 : 0.04221130
1 day(s) QLIKE                     : 18.17372898
3 day(s) MAE                       : 0.53220952
3 day(s) RMSE                      : 0.99800514
3 day(s) R2                        : -0.39712805
3 day(s) Pearson r                 : 0.01894964
3 day(s) QLIKE                     : 17.53100670
5 day(s) MAE                       : 0.53116278
5 day(s) RMSE                      : 0.99729538
5 day(s) R2                        : -0.39579086
5 day(s) Pearson r                 : 0.01090968
5 day(s) QLIKE                     : 17.24045445
10 day(s) MAE                      : 0.53241592
10 day(s) RMSE                     : 1.00140148
10 day(s) R2                       : -0.39386703
10 day(s) Pearson r                : -0.00824376
10 day(s) QLIKE                    : 17.50695789
full horizon MAE                   : 0.53241592
full horizon RMSE                  : 1.00140148
full horizon R2                    : -0.39386703
full horizon Pearson r             : -0.00824376
full horizon QLIKE                 : 17.50695789

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/MSFT/ITransformer_H10.pkl

Saved y_true min=0.27301, max=89.8516
Saved y_pred min=0.712367, max=9.86555

=== MSFT | H=20 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2018
Time steps for y: 20
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.550928529745745
  Min value:  -8.748202192355542
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.21818909955111
  Min value:  -2.8217406454840073
Checking X_price_val:
Shape: (161, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.2612343368490615
  Min value:  -4.427436593443818
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.8916888397222356
  Min value:  -1.3281087285370063
Checking X_price_test:
Shape: (404, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.030086059790657
  Min value:  -3.528134593538675
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9300069821373937
  Min value:  -2.3454031279530767
Epoch 1/50  Train 29.1267  Val 0.9468  0.3s
Epoch 2/50  Train 28.8411  Val 0.9398  0.2s
Epoch 3/50  Train 28.5817  Val 0.9332  0.2s
Epoch 4/50  Train 28.3338  Val 0.9269  0.3s
Epoch 5/50  Train 28.0966  Val 0.9208  0.3s
Epoch 6/50  Train 27.8695  Val 0.9151  0.3s
Epoch 7/50  Train 27.6520  Val 0.9096  0.3s
Epoch 8/50  Train 27.4436  Val 0.9043  0.2s
Epoch 9/50  Train 27.2437  Val 0.8993  0.2s
Epoch 10/50  Train 27.0519  Val 0.8944  0.2s
Epoch 11/50  Train 26.8676  Val 0.8897  0.3s
Epoch 12/50  Train 26.6906  Val 0.8852  0.2s
Epoch 13/50  Train 26.5203  Val 0.8809  0.2s
Epoch 14/50  Train 26.3565  Val 0.8767  0.3s
Epoch 15/50  Train 26.1987  Val 0.8727  0.3s
Epoch 16/50  Train 26.0468  Val 0.8688  0.2s
Epoch 17/50  Train 25.9002  Val 0.8650  0.2s
Epoch 18/50  Train 25.7589  Val 0.8614  0.2s
Epoch 19/50  Train 25.6225  Val 0.8579  0.2s
Epoch 20/50  Train 25.4908  Val 0.8544  0.3s
Epoch 21/50  Train 25.3636  Val 0.8511  0.2s
Epoch 22/50  Train 25.2406  Val 0.8479  0.3s
Epoch 23/50  Train 25.1217  Val 0.8448  0.3s
Epoch 24/50  Train 25.0067  Val 0.8417  0.4s
Epoch 25/50  Train 24.8954  Val 0.8388  0.3s
Epoch 26/50  Train 24.7876  Val 0.8359  0.3s
Epoch 27/50  Train 24.6833  Val 0.8331  0.3s
Epoch 28/50  Train 24.5822  Val 0.8304  0.4s
Epoch 29/50  Train 24.4844  Val 0.8277  0.3s
Epoch 30/50  Train 24.3895  Val 0.8251  0.3s
Epoch 31/50  Train 24.2976  Val 0.8226  0.3s
Epoch 32/50  Train 24.2085  Val 0.8202  0.3s
Epoch 33/50  Train 24.1221  Val 0.8178  0.3s
Epoch 34/50  Train 24.0384  Val 0.8154  0.2s
Epoch 35/50  Train 23.9573  Val 0.8132  0.2s
Epoch 36/50  Train 23.8786  Val 0.8109  0.2s
Epoch 37/50  Train 23.8023  Val 0.8088  0.3s
Epoch 38/50  Train 23.7283  Val 0.8067  0.3s
Epoch 39/50  Train 23.6566  Val 0.8046  0.3s
Epoch 40/50  Train 23.5871  Val 0.8026  0.3s
Epoch 41/50  Train 23.5197  Val 0.8006  0.3s
Epoch 42/50  Train 23.4545  Val 0.7987  0.4s
Epoch 43/50  Train 23.3912  Val 0.7968  0.3s
Epoch 44/50  Train 23.3299  Val 0.7950  0.2s
Epoch 45/50  Train 23.2704  Val 0.7933  0.4s
Epoch 46/50  Train 23.2129  Val 0.7915  0.3s
Epoch 47/50  Train 23.1572  Val 0.7899  0.2s
Epoch 48/50  Train 23.1032  Val 0.7882  0.2s
Epoch 49/50  Train 23.0509  Val 0.7866  0.2s
Epoch 50/50  Train 23.0003  Val 0.7851  0.3s

Parameters used in the single-fit model:
input_len: 60
output_len: 20
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.00875725
1 day(s) RMSE                      : 6.71183230
1 day(s) R2                        : -0.00270540
1 day(s) Pearson r                 : 0.14173295
1 day(s) QLIKE                     : 0.44635972
3 day(s) MAE                       : 2.02714534
3 day(s) RMSE                      : 6.77199896
3 day(s) R2                        : -0.02083126
3 day(s) Pearson r                 : 0.06799888
3 day(s) QLIKE                     : 0.47016016
5 day(s) MAE                       : 2.06433190
5 day(s) RMSE                      : 6.79927618
5 day(s) R2                        : -0.02895608
5 day(s) Pearson r                 : 0.02900698
5 day(s) QLIKE                     : 0.48254484
10 day(s) MAE                      : 2.08355063
10 day(s) RMSE                     : 6.82007655
10 day(s) R2                       : -0.03518536
10 day(s) Pearson r                : 0.00250382
10 day(s) QLIKE                    : 0.49375671
20 day(s) MAE                      : 2.13999928
20 day(s) RMSE                     : 7.03238608
20 day(s) R2                       : -0.03712239
20 day(s) Pearson r                : 0.00256215
20 day(s) QLIKE                    : 0.50488663
full horizon MAE                   : 2.13999928
full horizon RMSE                  : 7.03238608
full horizon R2                    : -0.03712239
full horizon Pearson r             : 0.00256215
full horizon QLIKE                 : 0.50488663

--- Task 2 ---
1 day(s) MAE                       : 0.53387619
1 day(s) RMSE                      : 0.99894852
1 day(s) R2                        : -0.39949158
1 day(s) Pearson r                 : 0.03017229
1 day(s) QLIKE                     : 18.69979925
3 day(s) MAE                       : 0.53221993
3 day(s) RMSE                      : 0.99802182
3 day(s) R2                        : -0.39717475
3 day(s) Pearson r                 : 0.01073724
3 day(s) QLIKE                     : 17.78811653
5 day(s) MAE                       : 0.53117582
5 day(s) RMSE                      : 0.99730759
5 day(s) R2                        : -0.39582504
5 day(s) Pearson r                 : -0.00073708
5 day(s) QLIKE                     : 17.42405422
10 day(s) MAE                      : 0.53239823
10 day(s) RMSE                     : 1.00137634
10 day(s) R2                       : -0.39379703
10 day(s) Pearson r                : -0.00549544
10 day(s) QLIKE                    : 17.93553742
20 day(s) MAE                      : 0.54570849
20 day(s) RMSE                     : 1.03415264
20 day(s) R2                       : -0.38559834
20 day(s) Pearson r                : -0.00101217
20 day(s) QLIKE                    : 18.01281534
full horizon MAE                   : 0.54570849
full horizon RMSE                  : 1.03415264
full horizon R2                    : -0.38559834
full horizon Pearson r             : -0.00101217
full horizon QLIKE                 : 18.01281534

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/MSFT/ITransformer_H20.pkl

Saved y_true min=0.27301, max=89.8516
Saved y_pred min=0.656947, max=9.50526

=== GE | H=1 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.10128934238581
  Min value:  -3.2166288924657587
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.030029259880507
  Min value:  -2.6526564326715185
Checking X_price_val:
Shape: (161, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.008530017154456
  Min value:  -3.2166288924657587
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.0718644033108666
  Min value:  -2.3663072843518616
Checking X_price_test:
Shape: (404, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.6741296103710765
  Min value:  -4.217627703807063
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.646703896552387
  Min value:  -2.1484208255749326
Epoch 1/50  Train 29.3338  Val 0.9223  0.2s
Epoch 2/50  Train 28.7270  Val 0.9226  0.2s
Epoch 3/50  Train 28.2121  Val 0.9229  0.2s
Epoch 4/50  Train 27.7526  Val 0.9232  0.2s
Epoch 5/50  Train 27.3401  Val 0.9234  0.2s
Epoch 6/50  Train 26.9682  Val 0.9236  0.2s
Epoch 7/50  Train 26.6317  Val 0.9238  0.3s
Epoch 8/50  Train 26.3264  Val 0.9239  0.2s
Epoch 9/50  Train 26.0486  Val 0.9241  0.2s
Epoch 10/50  Train 25.7954  Val 0.9242  0.2s
Epoch 11/50  Train 25.5642  Val 0.9243  0.2s
Epoch 12/50  Train 25.3525  Val 0.9245  0.2s
Epoch 13/50  Train 25.1585  Val 0.9246  0.2s
Epoch 14/50  Train 24.9804  Val 0.9247  0.2s
Epoch 15/50  Train 24.8167  Val 0.9248  0.2s
Epoch 16/50  Train 24.6659  Val 0.9249  0.2s
Epoch 17/50  Train 24.5269  Val 0.9250  0.2s
Epoch 18/50  Train 24.3985  Val 0.9251  0.2s
Epoch 19/50  Train 24.2799  Val 0.9252  0.2s
Epoch 20/50  Train 24.1701  Val 0.9254  0.2s
Epoch 21/50  Train 24.0683  Val 0.9255  0.2s
Epoch 22/50  Train 23.9739  Val 0.9256  0.2s
Epoch 23/50  Train 23.8862  Val 0.9257  0.2s
Epoch 24/50  Train 23.8047  Val 0.9257  0.2s
Epoch 25/50  Train 23.7289  Val 0.9258  0.2s
Epoch 26/50  Train 23.6581  Val 0.9259  0.2s
Epoch 27/50  Train 23.5922  Val 0.9260  0.2s
Epoch 28/50  Train 23.5306  Val 0.9261  0.2s
Epoch 29/50  Train 23.4730  Val 0.9262  0.2s
Epoch 30/50  Train 23.4190  Val 0.9262  0.2s
Early stopping triggered.

Parameters used in the single-fit model:
input_len: 60
output_len: 1
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.96505141
1 day(s) RMSE                      : 27.44607199
1 day(s) R2                        : -0.00145075
1 day(s) Pearson r                 : -0.02881735
1 day(s) QLIKE                     : 0.68548926
full horizon MAE                   : 4.96505141
full horizon RMSE                  : 27.44607199
full horizon R2                    : -0.00145075
full horizon Pearson r             : -0.02881735
full horizon QLIKE                 : 0.68548926

--- Task 2 ---
1 day(s) MAE                       : 0.77053005
1 day(s) RMSE                      : 1.45653670
1 day(s) R2                        : -0.38861111
1 day(s) Pearson r                 : -0.04783417
1 day(s) QLIKE                     : 18.02566618
full horizon MAE                   : 0.77053005
full horizon RMSE                  : 1.45653670
full horizon R2                    : -0.38861111
full horizon Pearson r             : -0.04783417
full horizon QLIKE                 : 18.02566618

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/GE/ITransformer_H1.pkl

Saved y_true min=0.591065, max=527.531
Saved y_pred min=4.71213, max=5.16528

=== GE | H=5 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.10128934238581
  Min value:  -3.2166288924657587
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.05628901565783
  Min value:  -2.6636414114466582
Checking X_price_val:
Shape: (161, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.008530017154456
  Min value:  -3.2166288924657587
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.0759413314511823
  Min value:  -2.376379373890814
Checking X_price_test:
Shape: (404, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.6741296103710765
  Min value:  -4.217627703807063
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.658989486293699
  Min value:  -2.1577982868808494
Epoch 1/50  Train 30.2684  Val 0.9250  0.2s
Epoch 2/50  Train 29.9248  Val 0.9249  0.2s
Epoch 3/50  Train 29.6151  Val 0.9248  0.2s
Epoch 4/50  Train 29.3217  Val 0.9247  0.2s
Epoch 5/50  Train 29.0430  Val 0.9246  0.2s
Epoch 6/50  Train 28.7777  Val 0.9246  0.2s
Epoch 7/50  Train 28.5248  Val 0.9245  0.3s
Epoch 8/50  Train 28.2837  Val 0.9245  0.2s
Epoch 9/50  Train 28.0536  Val 0.9244  0.2s
Epoch 10/50  Train 27.8339  Val 0.9244  0.2s
Epoch 11/50  Train 27.6241  Val 0.9244  0.2s
Epoch 12/50  Train 27.4236  Val 0.9244  0.2s
Epoch 13/50  Train 27.2320  Val 0.9244  0.2s
Epoch 14/50  Train 27.0488  Val 0.9244  0.2s
Epoch 15/50  Train 26.8737  Val 0.9244  0.2s
Epoch 16/50  Train 26.7062  Val 0.9244  0.2s
Epoch 17/50  Train 26.5459  Val 0.9244  0.3s
Epoch 18/50  Train 26.3926  Val 0.9245  0.3s
Epoch 19/50  Train 26.2459  Val 0.9245  0.3s
Epoch 20/50  Train 26.1055  Val 0.9245  0.3s
Epoch 21/50  Train 25.9711  Val 0.9245  0.3s
Epoch 22/50  Train 25.8424  Val 0.9246  0.3s
Epoch 23/50  Train 25.7191  Val 0.9246  0.3s
Epoch 24/50  Train 25.6011  Val 0.9246  0.3s
Epoch 25/50  Train 25.4881  Val 0.9247  0.3s
Epoch 26/50  Train 25.3798  Val 0.9247  0.2s
Epoch 27/50  Train 25.2760  Val 0.9247  0.2s
Epoch 28/50  Train 25.1766  Val 0.9248  0.2s
Epoch 29/50  Train 25.0813  Val 0.9248  0.2s
Epoch 30/50  Train 24.9900  Val 0.9249  0.2s
Early stopping triggered.

Parameters used in the single-fit model:
input_len: 60
output_len: 5
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.97329695
1 day(s) RMSE                      : 27.44592959
1 day(s) R2                        : -0.00144035
1 day(s) Pearson r                 : -0.03156749
1 day(s) QLIKE                     : 0.68581265
3 day(s) MAE                       : 4.96943146
3 day(s) RMSE                      : 27.44486653
3 day(s) R2                        : -0.00142040
3 day(s) Pearson r                 : -0.01691991
3 day(s) QLIKE                     : 0.68373671
5 day(s) MAE                       : 4.96270928
5 day(s) RMSE                      : 27.44452205
5 day(s) R2                        : -0.00145262
5 day(s) Pearson r                 : -0.01765190
5 day(s) QLIKE                     : 0.68244365
full horizon MAE                   : 4.96270928
full horizon RMSE                  : 27.44452205
full horizon R2                    : -0.00145262
full horizon Pearson r             : -0.01765190
full horizon QLIKE                 : 0.68244365

--- Task 2 ---
1 day(s) MAE                       : 0.77053074
1 day(s) RMSE                      : 1.45653642
1 day(s) R2                        : -0.38861057
1 day(s) Pearson r                 : -0.04526911
1 day(s) QLIKE                     : 19.03529012
3 day(s) MAE                       : 0.77441596
3 day(s) RMSE                      : 1.45967024
3 day(s) R2                        : -0.39172243
3 day(s) Pearson r                 : 0.12716003
3 day(s) QLIKE                     : 18.03475461
5 day(s) MAE                       : 0.77519278
5 day(s) RMSE                      : 1.46029898
5 day(s) R2                        : -0.39235186
5 day(s) Pearson r                 : 0.06924680
5 day(s) QLIKE                     : 17.56052773
full horizon MAE                   : 0.77519278
full horizon RMSE                  : 1.46029898
full horizon R2                    : -0.39235186
full horizon Pearson r             : 0.06924680
full horizon QLIKE                 : 17.56052773

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/GE/ITransformer_H5.pkl

Saved y_true min=0.591065, max=527.531
Saved y_pred min=4.67113, max=5.28734

=== GE | H=10 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2018
Time steps for y: 10
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.10128934238581
  Min value:  -3.2166288924657587
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.08059546680979
  Min value:  -2.6744326686468765
Checking X_price_val:
Shape: (161, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.008530017154456
  Min value:  -3.2166288924657587
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.079343715915981
  Min value:  -2.3863103666656165
Checking X_price_test:
Shape: (404, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.6741296103710765
  Min value:  -4.217627703807063
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.670127332275285
  Min value:  -2.1670746942644765
Epoch 1/50  Train 30.2557  Val 0.9449  0.2s
Epoch 2/50  Train 29.9969  Val 0.9446  0.2s
Epoch 3/50  Train 29.7608  Val 0.9444  0.2s
Epoch 4/50  Train 29.5342  Val 0.9442  0.3s
Epoch 5/50  Train 29.3160  Val 0.9440  0.2s
Epoch 6/50  Train 29.1055  Val 0.9439  0.2s
Epoch 7/50  Train 28.9023  Val 0.9437  0.2s
Epoch 8/50  Train 28.7060  Val 0.9435  0.2s
Epoch 9/50  Train 28.5162  Val 0.9434  0.2s
Epoch 10/50  Train 28.3327  Val 0.9433  0.2s
Epoch 11/50  Train 28.1552  Val 0.9431  0.2s
Epoch 12/50  Train 27.9835  Val 0.9430  0.2s
Epoch 13/50  Train 27.8173  Val 0.9429  0.2s
Epoch 14/50  Train 27.6565  Val 0.9428  0.3s
Epoch 15/50  Train 27.5010  Val 0.9427  0.3s
Epoch 16/50  Train 27.3504  Val 0.9426  0.3s
Epoch 17/50  Train 27.2047  Val 0.9425  0.3s
Epoch 18/50  Train 27.0636  Val 0.9424  0.3s
Epoch 19/50  Train 26.9271  Val 0.9423  0.3s
Epoch 20/50  Train 26.7950  Val 0.9423  0.3s
Epoch 21/50  Train 26.6671  Val 0.9422  0.3s
Epoch 22/50  Train 26.5433  Val 0.9421  0.2s
Epoch 23/50  Train 26.4236  Val 0.9421  0.3s
Epoch 24/50  Train 26.3077  Val 0.9420  0.3s
Epoch 25/50  Train 26.1955  Val 0.9419  0.3s
Epoch 26/50  Train 26.0870  Val 0.9419  0.3s
Epoch 27/50  Train 25.9819  Val 0.9418  0.3s
Epoch 28/50  Train 25.8803  Val 0.9418  0.3s
Epoch 29/50  Train 25.7820  Val 0.9417  0.3s
Epoch 30/50  Train 25.6869  Val 0.9417  0.3s
Epoch 31/50  Train 25.5949  Val 0.9417  0.3s
Epoch 32/50  Train 25.5059  Val 0.9416  0.3s
Epoch 33/50  Train 25.4198  Val 0.9416  0.4s
Epoch 34/50  Train 25.3365  Val 0.9415  0.2s
Epoch 35/50  Train 25.2559  Val 0.9415  0.5s
Epoch 36/50  Train 25.1780  Val 0.9415  0.2s
Epoch 37/50  Train 25.1026  Val 0.9415  0.2s
Epoch 38/50  Train 25.0298  Val 0.9414  0.2s
Epoch 39/50  Train 24.9593  Val 0.9414  0.2s
Epoch 40/50  Train 24.8911  Val 0.9414  0.2s
Epoch 41/50  Train 24.8252  Val 0.9413  0.2s
Epoch 42/50  Train 24.7614  Val 0.9413  0.2s
Epoch 43/50  Train 24.6998  Val 0.9413  0.2s
Epoch 44/50  Train 24.6402  Val 0.9413  0.2s
Epoch 45/50  Train 24.5825  Val 0.9412  0.2s
Epoch 46/50  Train 24.5268  Val 0.9412  0.2s
Epoch 47/50  Train 24.4728  Val 0.9412  0.2s
Epoch 48/50  Train 24.4207  Val 0.9412  0.2s
Epoch 49/50  Train 24.3703  Val 0.9412  0.2s
Epoch 50/50  Train 24.3215  Val 0.9411  0.2s

Parameters used in the single-fit model:
input_len: 60
output_len: 10
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.98166857
1 day(s) RMSE                      : 27.44533740
1 day(s) R2                        : -0.00139714
1 day(s) Pearson r                 : -0.03193386
1 day(s) QLIKE                     : 0.68572016
3 day(s) MAE                       : 4.97740417
3 day(s) RMSE                      : 27.44430784
3 day(s) R2                        : -0.00137963
3 day(s) Pearson r                 : -0.01658014
3 day(s) QLIKE                     : 0.68365778
5 day(s) MAE                       : 4.97070491
5 day(s) RMSE                      : 27.44398055
5 day(s) R2                        : -0.00141310
5 day(s) Pearson r                 : -0.01790147
5 day(s) QLIKE                     : 0.68238946
10 day(s) MAE                      : 4.97193386
10 day(s) RMSE                     : 27.44708005
10 day(s) R2                       : -0.00149232
10 day(s) Pearson r                : -0.01026894
10 day(s) QLIKE                    : 0.67930857
full horizon MAE                   : 4.97193386
full horizon RMSE                  : 27.44708005
full horizon R2                    : -0.00149232
full horizon Pearson r             : -0.01026894
full horizon QLIKE                 : 0.67930857

--- Task 2 ---
1 day(s) MAE                       : 0.77053042
1 day(s) RMSE                      : 1.45653646
1 day(s) R2                        : -0.38861065
1 day(s) Pearson r                 : -0.04314626
1 day(s) QLIKE                     : 18.48655255
3 day(s) MAE                       : 0.77441630
3 day(s) RMSE                      : 1.45967396
3 day(s) R2                        : -0.39172953
3 day(s) Pearson r                 : 0.08890457
3 day(s) QLIKE                     : 17.45572407
5 day(s) MAE                       : 0.77519314
5 day(s) RMSE                      : 1.46030152
5 day(s) R2                        : -0.39235672
5 day(s) Pearson r                 : 0.03635069
5 day(s) QLIKE                     : 16.99597746
10 day(s) MAE                      : 0.78128243
10 day(s) RMSE                     : 1.47245605
10 day(s) R2                       : -0.39184915
10 day(s) Pearson r                : 0.00734027
10 day(s) QLIKE                    : 16.47087276
full horizon MAE                   : 0.78128243
full horizon RMSE                  : 1.47245605
full horizon R2                    : -0.39184915
full horizon Pearson r             : 0.00734027
full horizon QLIKE                 : 16.47087276

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/GE/ITransformer_H10.pkl

Saved y_true min=0.591065, max=527.531
Saved y_pred min=4.60292, max=5.26605

=== GE | H=20 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2018
Time steps for y: 20
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.10128934238581
  Min value:  -3.2166288924657587
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.120224031597433
  Min value:  -2.6929431961408983
Checking X_price_val:
Shape: (161, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.008530017154456
  Min value:  -3.2166288924657587
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.084344850046558
  Min value:  -2.4033958725509903
Checking X_price_test:
Shape: (404, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.6741296103710765
  Min value:  -4.217627703807063
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.687942200751508
  Min value:  -2.1830758843283755
Epoch 1/50  Train 30.8571  Val 0.9679  0.2s
Epoch 2/50  Train 30.6482  Val 0.9676  0.2s
Epoch 3/50  Train 30.4557  Val 0.9674  0.3s
Epoch 4/50  Train 30.2689  Val 0.9671  0.2s
Epoch 5/50  Train 30.0873  Val 0.9669  0.2s
Epoch 6/50  Train 29.9102  Val 0.9667  0.2s
Epoch 7/50  Train 29.7375  Val 0.9665  0.2s
Epoch 8/50  Train 29.5689  Val 0.9663  0.2s
Epoch 9/50  Train 29.4044  Val 0.9661  0.2s
Epoch 10/50  Train 29.2436  Val 0.9659  0.2s
Epoch 11/50  Train 29.0866  Val 0.9658  0.2s
Epoch 12/50  Train 28.9332  Val 0.9656  0.2s
Epoch 13/50  Train 28.7833  Val 0.9655  0.2s
Epoch 14/50  Train 28.6368  Val 0.9653  0.2s
Epoch 15/50  Train 28.4937  Val 0.9652  0.3s
Epoch 16/50  Train 28.3538  Val 0.9650  0.2s
Epoch 17/50  Train 28.2171  Val 0.9649  0.2s
Epoch 18/50  Train 28.0835  Val 0.9648  0.2s
Epoch 19/50  Train 27.9529  Val 0.9646  0.2s
Epoch 20/50  Train 27.8253  Val 0.9645  0.2s
Epoch 21/50  Train 27.7007  Val 0.9644  0.2s
Epoch 22/50  Train 27.5789  Val 0.9643  0.2s
Epoch 23/50  Train 27.4598  Val 0.9642  0.2s
Epoch 24/50  Train 27.3436  Val 0.9641  0.2s
Epoch 25/50  Train 27.2300  Val 0.9640  0.2s
Epoch 26/50  Train 27.1190  Val 0.9639  0.3s
Epoch 27/50  Train 27.0106  Val 0.9638  0.3s
Epoch 28/50  Train 26.9048  Val 0.9638  0.3s
Epoch 29/50  Train 26.8014  Val 0.9637  0.3s
Epoch 30/50  Train 26.7005  Val 0.9636  0.3s
Epoch 31/50  Train 26.6019  Val 0.9635  0.3s
Epoch 32/50  Train 26.5057  Val 0.9635  0.3s
Epoch 33/50  Train 26.4118  Val 0.9634  0.3s
Epoch 34/50  Train 26.3202  Val 0.9633  0.3s
Epoch 35/50  Train 26.2308  Val 0.9632  0.3s
Epoch 36/50  Train 26.1435  Val 0.9632  0.2s
Epoch 37/50  Train 26.0584  Val 0.9631  0.2s
Epoch 38/50  Train 25.9754  Val 0.9631  0.2s
Epoch 39/50  Train 25.8944  Val 0.9630  0.2s
Epoch 40/50  Train 25.8154  Val 0.9630  0.2s
Epoch 41/50  Train 25.7383  Val 0.9629  0.2s
Epoch 42/50  Train 25.6632  Val 0.9628  0.2s
Epoch 43/50  Train 25.5900  Val 0.9628  0.2s
Epoch 44/50  Train 25.5187  Val 0.9627  0.2s
Epoch 45/50  Train 25.4491  Val 0.9627  0.2s
Epoch 46/50  Train 25.3813  Val 0.9626  0.2s
Epoch 47/50  Train 25.3153  Val 0.9626  0.2s
Epoch 48/50  Train 25.2510  Val 0.9626  0.2s
Epoch 49/50  Train 25.1883  Val 0.9625  0.3s
Epoch 50/50  Train 25.1272  Val 0.9625  0.2s

Parameters used in the single-fit model:
input_len: 60
output_len: 20
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.99859675
1 day(s) RMSE                      : 27.44431009
1 day(s) R2                        : -0.00132217
1 day(s) Pearson r                 : -0.02946415
1 day(s) QLIKE                     : 0.68569512
3 day(s) MAE                       : 4.99358464
3 day(s) RMSE                      : 27.44337156
3 day(s) R2                        : -0.00131131
3 day(s) Pearson r                 : -0.01603747
3 day(s) QLIKE                     : 0.68367744
5 day(s) MAE                       : 4.98721778
5 day(s) RMSE                      : 27.44302088
5 day(s) R2                        : -0.00134306
5 day(s) Pearson r                 : -0.01782981
5 day(s) QLIKE                     : 0.68243152
10 day(s) MAE                      : 4.98842296
10 day(s) RMSE                     : 27.44605412
10 day(s) R2                       : -0.00141746
10 day(s) Pearson r                : -0.01033855
10 day(s) QLIKE                    : 0.67933638
20 day(s) MAE                      : 4.98557932
20 day(s) RMSE                     : 27.44791431
20 day(s) R2                       : -0.00150021
20 day(s) Pearson r                : -0.00640148
20 day(s) QLIKE                    : 0.67500374
full horizon MAE                   : 4.98557932
full horizon RMSE                  : 27.44791431
full horizon R2                    : -0.00150021
full horizon Pearson r             : -0.00640148
full horizon QLIKE                 : 0.67500374

--- Task 2 ---
1 day(s) MAE                       : 0.77053142
1 day(s) RMSE                      : 1.45653612
1 day(s) R2                        : -0.38861000
1 day(s) Pearson r                 : -0.04127292
1 day(s) QLIKE                     : 18.69435853
3 day(s) MAE                       : 0.77441637
3 day(s) RMSE                      : 1.45967253
3 day(s) R2                        : -0.39172681
3 day(s) Pearson r                 : 0.11181015
3 day(s) QLIKE                     : 17.55257058
5 day(s) MAE                       : 0.77519196
5 day(s) RMSE                      : 1.46029984
5 day(s) R2                        : -0.39235351
5 day(s) Pearson r                 : 0.02162081
5 day(s) QLIKE                     : 17.15144940
10 day(s) MAE                      : 0.78128147
10 day(s) RMSE                     : 1.47245447
10 day(s) R2                       : -0.39184616
10 day(s) Pearson r                : 0.00711622
10 day(s) QLIKE                    : 16.62448222
20 day(s) MAE                      : 0.79014844
20 day(s) RMSE                     : 1.48378844
20 day(s) R2                       : -0.39581391
20 day(s) Pearson r                : 0.00971681
20 day(s) QLIKE                    : 16.25652444
full horizon MAE                   : 0.79014844
full horizon RMSE                  : 1.48378844
full horizon R2                    : -0.39581391
full horizon Pearson r             : 0.00971681
full horizon QLIKE                 : 16.25652444

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/GE/ITransformer_H20.pkl

Saved y_true min=0.591065, max=527.531
Saved y_pred min=4.59903, max=5.37702

=== BAC | H=1 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.142355974179623
  Min value:  -9.003598937548427
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.107742786547419
  Min value:  -3.152671373228804
Checking X_price_val:
Shape: (161, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.800014951166819
  Min value:  -2.988682485704703
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.2542888447212457
  Min value:  -1.2924015525366714
Checking X_price_test:
Shape: (404, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.111754968787336
  Min value:  -5.46095162532849
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.207108669727662
  Min value:  -1.9461535420618155
Epoch 1/50  Train 24.2424  Val 0.8604  0.2s
Epoch 2/50  Train 23.6341  Val 0.8520  0.2s
Epoch 3/50  Train 23.1402  Val 0.8444  0.2s
Epoch 4/50  Train 22.7081  Val 0.8377  0.2s
Epoch 5/50  Train 22.3286  Val 0.8316  0.3s
Epoch 6/50  Train 21.9937  Val 0.8261  0.2s
Epoch 7/50  Train 21.6967  Val 0.8212  0.2s
Epoch 8/50  Train 21.4318  Val 0.8167  0.2s
Epoch 9/50  Train 21.1946  Val 0.8126  0.2s
Epoch 10/50  Train 20.9809  Val 0.8088  0.2s
Epoch 11/50  Train 20.7877  Val 0.8054  0.2s
Epoch 12/50  Train 20.6122  Val 0.8023  0.2s
Epoch 13/50  Train 20.4521  Val 0.7994  0.2s
Epoch 14/50  Train 20.3055  Val 0.7968  0.2s
Epoch 15/50  Train 20.1707  Val 0.7944  0.2s
Epoch 16/50  Train 20.0463  Val 0.7922  0.2s
Epoch 17/50  Train 19.9311  Val 0.7901  0.2s
Epoch 18/50  Train 19.8241  Val 0.7883  0.2s
Epoch 19/50  Train 19.7244  Val 0.7866  0.2s
Epoch 20/50  Train 19.6311  Val 0.7850  0.2s
Epoch 21/50  Train 19.5437  Val 0.7836  0.3s
Epoch 22/50  Train 19.4615  Val 0.7823  0.2s
Epoch 23/50  Train 19.3840  Val 0.7811  0.2s
Epoch 24/50  Train 19.3107  Val 0.7800  0.2s
Epoch 25/50  Train 19.2413  Val 0.7790  0.2s
Epoch 26/50  Train 19.1755  Val 0.7781  0.2s
Epoch 27/50  Train 19.1128  Val 0.7773  0.2s
Epoch 28/50  Train 19.0530  Val 0.7765  0.2s
Epoch 29/50  Train 18.9959  Val 0.7759  0.2s
Epoch 30/50  Train 18.9412  Val 0.7752  0.2s
Epoch 31/50  Train 18.8888  Val 0.7747  0.2s
Epoch 32/50  Train 18.8385  Val 0.7742  0.2s
Epoch 33/50  Train 18.7900  Val 0.7737  0.2s
Epoch 34/50  Train 18.7434  Val 0.7733  0.2s
Epoch 35/50  Train 18.6983  Val 0.7729  0.2s
Epoch 36/50  Train 18.6548  Val 0.7726  0.2s
Epoch 37/50  Train 18.6127  Val 0.7723  0.2s
Epoch 38/50  Train 18.5719  Val 0.7720  0.2s
Epoch 39/50  Train 18.5323  Val 0.7718  0.2s
Epoch 40/50  Train 18.4938  Val 0.7716  0.2s
Epoch 41/50  Train 18.4563  Val 0.7714  0.2s
Epoch 42/50  Train 18.4199  Val 0.7712  0.2s
Epoch 43/50  Train 18.3843  Val 0.7711  0.2s
Epoch 44/50  Train 18.3496  Val 0.7710  0.2s
Epoch 45/50  Train 18.3157  Val 0.7709  0.2s
Epoch 46/50  Train 18.2825  Val 0.7708  0.2s
Epoch 47/50  Train 18.2500  Val 0.7707  0.3s
Epoch 48/50  Train 18.2181  Val 0.7707  0.4s
Epoch 49/50  Train 18.1868  Val 0.7707  0.3s
Epoch 50/50  Train 18.1561  Val 0.7706  0.3s

Parameters used in the single-fit model:
input_len: 60
output_len: 1
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.13593101
1 day(s) RMSE                      : 7.60659705
1 day(s) R2                        : 0.03980931
1 day(s) Pearson r                 : 0.33805327
1 day(s) QLIKE                     : 0.34770757
full horizon MAE                   : 2.13593101
full horizon RMSE                  : 7.60659705
full horizon R2                    : 0.03980931
full horizon Pearson r             : 0.33805327
full horizon QLIKE                 : 0.34770757

--- Task 2 ---
1 day(s) MAE                       : 0.60429187
1 day(s) RMSE                      : 1.20424477
1 day(s) R2                        : -0.33640829
1 day(s) Pearson r                 : -0.04884508
1 day(s) QLIKE                     : 20.81920533
full horizon MAE                   : 0.60429187
full horizon RMSE                  : 1.20424477
full horizon R2                    : -0.33640829
full horizon Pearson r             : -0.04884508
full horizon QLIKE                 : 20.81920533

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/BAC/ITransformer_H1.pkl

Saved y_true min=0.587715, max=111.472
Saved y_pred min=1.96761, max=11.2972

=== BAC | H=5 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.142355974179623
  Min value:  -9.003598937548427
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.106232115580254
  Min value:  -3.1505690923369047
Checking X_price_val:
Shape: (161, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.800014951166819
  Min value:  -2.988682485704703
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.253588840112363
  Min value:  -1.2911129191419828
Checking X_price_test:
Shape: (404, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.111754968787336
  Min value:  -5.46095162532849
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.205991919409481
  Min value:  -1.9445789696790527
Epoch 1/50  Train 26.9898  Val 0.8816  0.3s
Epoch 2/50  Train 26.5718  Val 0.8761  0.3s
Epoch 3/50  Train 26.2018  Val 0.8709  0.3s
Epoch 4/50  Train 25.8539  Val 0.8661  0.3s
Epoch 5/50  Train 25.5271  Val 0.8615  0.3s
Epoch 6/50  Train 25.2200  Val 0.8571  0.3s
Epoch 7/50  Train 24.9313  Val 0.8530  0.2s
Epoch 8/50  Train 24.6597  Val 0.8491  0.2s
Epoch 9/50  Train 24.4038  Val 0.8454  0.2s
Epoch 10/50  Train 24.1626  Val 0.8419  0.2s
Epoch 11/50  Train 23.9350  Val 0.8385  0.2s
Epoch 12/50  Train 23.7200  Val 0.8353  0.2s
Epoch 13/50  Train 23.5169  Val 0.8323  0.2s
Epoch 14/50  Train 23.3247  Val 0.8294  0.2s
Epoch 15/50  Train 23.1429  Val 0.8267  0.2s
Epoch 16/50  Train 22.9706  Val 0.8241  0.2s
Epoch 17/50  Train 22.8074  Val 0.8216  0.2s
Epoch 18/50  Train 22.6527  Val 0.8192  0.2s
Epoch 19/50  Train 22.5059  Val 0.8170  0.2s
Epoch 20/50  Train 22.3665  Val 0.8148  0.2s
Epoch 21/50  Train 22.2341  Val 0.8128  0.2s
Epoch 22/50  Train 22.1083  Val 0.8108  0.2s
Epoch 23/50  Train 21.9886  Val 0.8089  0.2s
Epoch 24/50  Train 21.8747  Val 0.8071  0.2s
Epoch 25/50  Train 21.7663  Val 0.8054  0.2s
Epoch 26/50  Train 21.6631  Val 0.8038  0.2s
Epoch 27/50  Train 21.5646  Val 0.8023  0.2s
Epoch 28/50  Train 21.4707  Val 0.8008  0.2s
Epoch 29/50  Train 21.3811  Val 0.7994  0.2s
Epoch 30/50  Train 21.2954  Val 0.7980  0.2s
Epoch 31/50  Train 21.2136  Val 0.7967  0.2s
Epoch 32/50  Train 21.1353  Val 0.7955  0.2s
Epoch 33/50  Train 21.0604  Val 0.7943  0.3s
Epoch 34/50  Train 20.9886  Val 0.7931  0.2s
Epoch 35/50  Train 20.9198  Val 0.7921  0.2s
Epoch 36/50  Train 20.8539  Val 0.7910  0.2s
Epoch 37/50  Train 20.7905  Val 0.7900  0.2s
Epoch 38/50  Train 20.7297  Val 0.7891  0.2s
Epoch 39/50  Train 20.6712  Val 0.7882  0.2s
Epoch 40/50  Train 20.6149  Val 0.7873  0.2s
Epoch 41/50  Train 20.5607  Val 0.7864  0.2s
Epoch 42/50  Train 20.5085  Val 0.7856  0.2s
Epoch 43/50  Train 20.4581  Val 0.7848  0.2s
Epoch 44/50  Train 20.4095  Val 0.7841  0.2s
Epoch 45/50  Train 20.3626  Val 0.7834  0.3s
Epoch 46/50  Train 20.3173  Val 0.7827  0.3s
Epoch 47/50  Train 20.2734  Val 0.7820  0.3s
Epoch 48/50  Train 20.2310  Val 0.7814  0.3s
Epoch 49/50  Train 20.1899  Val 0.7808  0.3s
Epoch 50/50  Train 20.1500  Val 0.7802  0.3s

Parameters used in the single-fit model:
input_len: 60
output_len: 5
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.18069961
1 day(s) RMSE                      : 7.69822169
1 day(s) R2                        : 0.01653820
1 day(s) Pearson r                 : 0.28169557
1 day(s) QLIKE                     : 0.36807469
3 day(s) MAE                       : 2.20292642
3 day(s) RMSE                      : 7.75488436
3 day(s) R2                        : 0.00264463
3 day(s) Pearson r                 : 0.17817359
3 day(s) QLIKE                     : 0.38956409
5 day(s) MAE                       : 2.22102442
5 day(s) RMSE                      : 7.78239280
5 day(s) R2                        : -0.00413330
5 day(s) Pearson r                 : 0.13265113
5 day(s) QLIKE                     : 0.40056290
full horizon MAE                   : 2.22102442
full horizon RMSE                  : 7.78239280
full horizon R2                    : -0.00413330
full horizon Pearson r             : 0.13265113
full horizon QLIKE                 : 0.40056290

--- Task 2 ---
1 day(s) MAE                       : 0.60422084
1 day(s) RMSE                      : 1.20423731
1 day(s) R2                        : -0.33639171
1 day(s) Pearson r                 : -0.04287388
1 day(s) QLIKE                     : 20.27797712
3 day(s) MAE                       : 0.60850169
3 day(s) RMSE                      : 1.21034222
3 day(s) R2                        : -0.33820675
3 day(s) Pearson r                 : -0.00115862
3 day(s) QLIKE                     : 19.73329689
5 day(s) MAE                       : 0.61130957
5 day(s) RMSE                      : 1.21416267
5 day(s) R2                        : -0.33950863
5 day(s) Pearson r                 : -0.00306330
5 day(s) QLIKE                     : 19.37207439
full horizon MAE                   : 0.61130957
full horizon RMSE                  : 1.21416267
full horizon R2                    : -0.33950863
full horizon Pearson r             : -0.00306330
full horizon QLIKE                 : 19.37207439

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/BAC/ITransformer_H5.pkl

Saved y_true min=0.587715, max=111.472
Saved y_pred min=1.8675, max=11.4547

=== BAC | H=10 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2018
Time steps for y: 10
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.142355974179623
  Min value:  -9.003598937548427
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.103196331248472
  Min value:  -3.1492038976362813
Checking X_price_val:
Shape: (161, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.800014951166819
  Min value:  -2.988682485704703
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.2515405379791398
  Min value:  -1.2907388380002114
Checking X_price_test:
Shape: (404, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.111754968787336
  Min value:  -5.46095162532849
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.203435974440148
  Min value:  -1.943856582902401
Epoch 1/50  Train 26.8887  Val 0.8859  0.3s
Epoch 2/50  Train 26.5828  Val 0.8813  0.3s
Epoch 3/50  Train 26.3085  Val 0.8770  0.3s
Epoch 4/50  Train 26.0472  Val 0.8728  0.2s
Epoch 5/50  Train 25.7983  Val 0.8689  0.2s
Epoch 6/50  Train 25.5616  Val 0.8651  0.2s
Epoch 7/50  Train 25.3363  Val 0.8615  0.3s
Epoch 8/50  Train 25.1219  Val 0.8580  0.2s
Epoch 9/50  Train 24.9177  Val 0.8547  0.2s
Epoch 10/50  Train 24.7232  Val 0.8516  0.2s
Epoch 11/50  Train 24.5377  Val 0.8485  0.2s
Epoch 12/50  Train 24.3609  Val 0.8456  0.2s
Epoch 13/50  Train 24.1921  Val 0.8428  0.2s
Epoch 14/50  Train 24.0309  Val 0.8401  0.2s
Epoch 15/50  Train 23.8769  Val 0.8375  0.2s
Epoch 16/50  Train 23.7297  Val 0.8351  0.2s
Epoch 17/50  Train 23.5889  Val 0.8327  0.2s
Epoch 18/50  Train 23.4543  Val 0.8304  0.2s
Epoch 19/50  Train 23.3253  Val 0.8281  0.2s
Epoch 20/50  Train 23.2019  Val 0.8260  0.2s
Epoch 21/50  Train 23.0836  Val 0.8239  0.2s
Epoch 22/50  Train 22.9702  Val 0.8219  0.3s
Epoch 23/50  Train 22.8614  Val 0.8200  0.2s
Epoch 24/50  Train 22.7571  Val 0.8182  0.2s
Epoch 25/50  Train 22.6570  Val 0.8164  0.3s
Epoch 26/50  Train 22.5609  Val 0.8147  0.3s
Epoch 27/50  Train 22.4686  Val 0.8130  0.3s
Epoch 28/50  Train 22.3800  Val 0.8114  0.3s
Epoch 29/50  Train 22.2947  Val 0.8099  0.3s
Epoch 30/50  Train 22.2128  Val 0.8084  0.3s
Epoch 31/50  Train 22.1340  Val 0.8069  0.3s
Epoch 32/50  Train 22.0581  Val 0.8055  0.3s
Epoch 33/50  Train 21.9851  Val 0.8041  0.3s
Epoch 34/50  Train 21.9148  Val 0.8028  0.4s
Epoch 35/50  Train 21.8471  Val 0.8016  0.3s
Epoch 36/50  Train 21.7819  Val 0.8003  0.2s
Epoch 37/50  Train 21.7189  Val 0.7992  0.2s
Epoch 38/50  Train 21.6583  Val 0.7980  0.2s
Epoch 39/50  Train 21.5997  Val 0.7969  0.2s
Epoch 40/50  Train 21.5432  Val 0.7958  0.2s
Epoch 41/50  Train 21.4887  Val 0.7948  0.2s
Epoch 42/50  Train 21.4360  Val 0.7938  0.2s
Epoch 43/50  Train 21.3851  Val 0.7928  0.2s
Epoch 44/50  Train 21.3358  Val 0.7918  0.2s
Epoch 45/50  Train 21.2882  Val 0.7909  0.2s
Epoch 46/50  Train 21.2421  Val 0.7900  0.2s
Epoch 47/50  Train 21.1975  Val 0.7891  0.2s
Epoch 48/50  Train 21.1544  Val 0.7883  0.2s
Epoch 49/50  Train 21.1125  Val 0.7875  0.2s
Epoch 50/50  Train 21.0720  Val 0.7867  0.2s

Parameters used in the single-fit model:
input_len: 60
output_len: 10
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.18332134
1 day(s) RMSE                      : 7.70133918
1 day(s) R2                        : 0.01574150
1 day(s) Pearson r                 : 0.27106080
1 day(s) QLIKE                     : 0.36964120
3 day(s) MAE                       : 2.20899261
3 day(s) RMSE                      : 7.76314754
3 day(s) R2                        : 0.00051805
3 day(s) Pearson r                 : 0.16444171
3 day(s) QLIKE                     : 0.39299769
5 day(s) MAE                       : 2.22811265
5 day(s) RMSE                      : 7.78889575
5 day(s) R2                        : -0.00581211
5 day(s) Pearson r                 : 0.12249496
5 day(s) QLIKE                     : 0.40344094
10 day(s) MAE                      : 2.24726349
10 day(s) RMSE                     : 7.81312319
10 day(s) R2                       : -0.01198299
10 day(s) Pearson r                : 0.07943902
10 day(s) QLIKE                    : 0.41222476
full horizon MAE                   : 2.24726349
full horizon RMSE                  : 7.81312319
full horizon R2                    : -0.01198299
full horizon Pearson r             : 0.07943902
full horizon QLIKE                 : 0.41222476

--- Task 2 ---
1 day(s) MAE                       : 0.60416619
1 day(s) RMSE                      : 1.20421587
1 day(s) R2                        : -0.33634413
1 day(s) Pearson r                 : -0.02856167
1 day(s) QLIKE                     : 19.11951693
3 day(s) MAE                       : 0.60869627
3 day(s) RMSE                      : 1.21043915
3 day(s) R2                        : -0.33842109
3 day(s) Pearson r                 : 0.00104090
3 day(s) QLIKE                     : 18.77999789
5 day(s) MAE                       : 0.61142407
5 day(s) RMSE                      : 1.21421389
5 day(s) R2                        : -0.33962165
5 day(s) Pearson r                 : 0.00057361
5 day(s) QLIKE                     : 18.62460078
10 day(s) MAE                      : 0.61635269
10 day(s) RMSE                     : 1.22240924
10 day(s) R2                       : -0.34024328
10 day(s) Pearson r                : 0.04033533
10 day(s) QLIKE                    : 19.00559009
full horizon MAE                   : 0.61635269
full horizon RMSE                  : 1.22240924
full horizon R2                    : -0.34024328
full horizon Pearson r             : 0.04033533
full horizon QLIKE                 : 19.00559009

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/BAC/ITransformer_H10.pkl

Saved y_true min=0.587715, max=111.472
Saved y_pred min=1.66268, max=11.0126

=== BAC | H=20 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2018
Time steps for y: 20
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.142355974179623
  Min value:  -9.003598937548427
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.103268362353512
  Min value:  -3.1487890186062453
Checking X_price_val:
Shape: (161, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.800014951166819
  Min value:  -2.988682485704703
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.2516894965633285
  Min value:  -1.2904011693419095
Checking X_price_test:
Shape: (404, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.111754968787336
  Min value:  -5.46095162532849
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.203545386302637
  Min value:  -1.9434917803124665
Epoch 1/50  Train 27.6664  Val 0.8980  0.2s
Epoch 2/50  Train 27.4160  Val 0.8941  0.2s
Epoch 3/50  Train 27.1897  Val 0.8903  0.2s
Epoch 4/50  Train 26.9716  Val 0.8867  0.2s
Epoch 5/50  Train 26.7616  Val 0.8832  0.3s
Epoch 6/50  Train 26.5595  Val 0.8798  0.2s
Epoch 7/50  Train 26.3651  Val 0.8765  0.3s
Epoch 8/50  Train 26.1781  Val 0.8734  0.2s
Epoch 9/50  Train 25.9982  Val 0.8703  0.2s
Epoch 10/50  Train 25.8250  Val 0.8674  0.2s
Epoch 11/50  Train 25.6583  Val 0.8645  0.2s
Epoch 12/50  Train 25.4977  Val 0.8618  0.2s
Epoch 13/50  Train 25.3430  Val 0.8591  0.2s
Epoch 14/50  Train 25.1939  Val 0.8565  0.3s
Epoch 15/50  Train 25.0501  Val 0.8540  0.3s
Epoch 16/50  Train 24.9114  Val 0.8515  0.2s
Epoch 17/50  Train 24.7775  Val 0.8492  0.3s
Epoch 18/50  Train 24.6483  Val 0.8469  0.2s
Epoch 19/50  Train 24.5234  Val 0.8446  0.2s
Epoch 20/50  Train 24.4028  Val 0.8424  0.2s
Epoch 21/50  Train 24.2863  Val 0.8403  0.2s
Epoch 22/50  Train 24.1736  Val 0.8382  0.2s
Epoch 23/50  Train 24.0645  Val 0.8362  0.2s
Epoch 24/50  Train 23.9590  Val 0.8343  0.2s
Epoch 25/50  Train 23.8569  Val 0.8323  0.2s
Epoch 26/50  Train 23.7580  Val 0.8305  0.3s
Epoch 27/50  Train 23.6623  Val 0.8286  0.3s
Epoch 28/50  Train 23.5695  Val 0.8269  0.3s
Epoch 29/50  Train 23.4796  Val 0.8251  0.3s
Epoch 30/50  Train 23.3925  Val 0.8234  0.3s
Epoch 31/50  Train 23.3080  Val 0.8218  0.3s
Epoch 32/50  Train 23.2261  Val 0.8202  0.3s
Epoch 33/50  Train 23.1467  Val 0.8186  0.3s
Epoch 34/50  Train 23.0697  Val 0.8170  0.3s
Epoch 35/50  Train 22.9949  Val 0.8155  0.2s
Epoch 36/50  Train 22.9224  Val 0.8140  0.2s
Epoch 37/50  Train 22.8520  Val 0.8126  0.2s
Epoch 38/50  Train 22.7837  Val 0.8112  0.2s
Epoch 39/50  Train 22.7174  Val 0.8098  0.2s
Epoch 40/50  Train 22.6531  Val 0.8084  0.2s
Epoch 41/50  Train 22.5907  Val 0.8071  0.2s
Epoch 42/50  Train 22.5301  Val 0.8058  0.2s
Epoch 43/50  Train 22.4712  Val 0.8045  0.2s
Epoch 44/50  Train 22.4141  Val 0.8033  0.2s
Epoch 45/50  Train 22.3586  Val 0.8021  0.2s
Epoch 46/50  Train 22.3048  Val 0.8009  0.2s
Epoch 47/50  Train 22.2526  Val 0.7997  0.2s
Epoch 48/50  Train 22.2018  Val 0.7986  0.2s
Epoch 49/50  Train 22.1526  Val 0.7975  0.2s
Epoch 50/50  Train 22.1048  Val 0.7964  0.2s

Parameters used in the single-fit model:
input_len: 60
output_len: 20
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.19746740
1 day(s) RMSE                      : 7.72516176
1 day(s) R2                        : 0.00964286
1 day(s) Pearson r                 : 0.24950813
1 day(s) QLIKE                     : 0.37702621
3 day(s) MAE                       : 2.21872164
3 day(s) RMSE                      : 7.77802303
3 day(s) R2                        : -0.00331598
3 day(s) Pearson r                 : 0.13629815
3 day(s) QLIKE                     : 0.39825874
5 day(s) MAE                       : 2.23701756
5 day(s) RMSE                      : 7.79821120
5 day(s) R2                        : -0.00821943
5 day(s) Pearson r                 : 0.10292923
5 day(s) QLIKE                     : 0.40710205
10 day(s) MAE                      : 2.25251391
10 day(s) RMSE                     : 7.81726990
10 day(s) R2                       : -0.01305747
10 day(s) Pearson r                : 0.07053669
10 day(s) QLIKE                    : 0.41346666
20 day(s) MAE                      : 2.26522910
20 day(s) RMSE                     : 7.83152164
20 day(s) R2                       : -0.01708960
20 day(s) Pearson r                : 0.03913770
20 day(s) QLIKE                    : 0.41691088
full horizon MAE                   : 2.26522910
full horizon RMSE                  : 7.83152164
full horizon R2                    : -0.01708960
full horizon Pearson r             : 0.03913770
full horizon QLIKE                 : 0.41691088

--- Task 2 ---
1 day(s) MAE                       : 0.60388945
1 day(s) RMSE                      : 1.20416115
1 day(s) R2                        : -0.33622268
1 day(s) Pearson r                 : -0.02086665
1 day(s) QLIKE                     : 18.32239190
3 day(s) MAE                       : 0.60995004
3 day(s) RMSE                      : 1.21217757
3 day(s) R2                        : -0.34226831
3 day(s) Pearson r                 : 0.00083840
3 day(s) QLIKE                     : 18.23971774
5 day(s) MAE                       : 0.61216193
5 day(s) RMSE                      : 1.21523558
5 day(s) R2                        : -0.34187704
5 day(s) Pearson r                 : 0.00068338
5 day(s) QLIKE                     : 18.28225203
10 day(s) MAE                      : 0.61892908
10 day(s) RMSE                     : 1.23502464
10 day(s) R2                       : -0.36804895
10 day(s) Pearson r                : 0.04042312
10 day(s) QLIKE                    : 18.94147984
20 day(s) MAE                      : 0.62482475
20 day(s) RMSE                     : 1.23685961
20 day(s) R2                       : -0.35874658
20 day(s) Pearson r                : 0.02829382
20 day(s) QLIKE                    : 21.18845942
full horizon MAE                   : 0.62482475
full horizon RMSE                  : 1.23685961
full horizon R2                    : -0.35874658
full horizon Pearson r             : 0.02829382
full horizon QLIKE                 : 21.18845942

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/BAC/ITransformer_H20.pkl

Saved y_true min=0.587715, max=111.472
Saved y_pred min=1.4288, max=12.1328

=== C | H=1 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.80914068636715
  Min value:  -9.273594127665117
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.116803264609635
  Min value:  -2.5790852911299216
Checking X_price_val:
Shape: (161, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.833292676987295
  Min value:  -3.3424547859577265
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.6474166417153167
  Min value:  -1.6630480776200625
Checking X_price_test:
Shape: (404, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.61558566904287
  Min value:  -5.593651881065264
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.113246907180985
  Min value:  -2.0589048270008967
Epoch 1/50  Train 23.6838  Val 0.7958  0.3s
Epoch 2/50  Train 23.1299  Val 0.7895  0.2s
Epoch 3/50  Train 22.6711  Val 0.7839  0.2s
Epoch 4/50  Train 22.2673  Val 0.7788  0.2s
Epoch 5/50  Train 21.9097  Val 0.7743  0.2s
Epoch 6/50  Train 21.5910  Val 0.7702  0.2s
Epoch 7/50  Train 21.3057  Val 0.7664  0.2s
Epoch 8/50  Train 21.0491  Val 0.7630  0.2s
Epoch 9/50  Train 20.8173  Val 0.7599  0.2s
Epoch 10/50  Train 20.6071  Val 0.7570  0.2s
Epoch 11/50  Train 20.4158  Val 0.7544  0.2s
Epoch 12/50  Train 20.2412  Val 0.7520  0.2s
Epoch 13/50  Train 20.0812  Val 0.7498  0.2s
Epoch 14/50  Train 19.9342  Val 0.7478  0.2s
Epoch 15/50  Train 19.7987  Val 0.7459  0.3s
Epoch 16/50  Train 19.6736  Val 0.7442  0.2s
Epoch 17/50  Train 19.5576  Val 0.7426  0.2s
Epoch 18/50  Train 19.4500  Val 0.7411  0.2s
Epoch 19/50  Train 19.3497  Val 0.7398  0.2s
Epoch 20/50  Train 19.2561  Val 0.7385  0.2s
Epoch 21/50  Train 19.1686  Val 0.7374  0.2s
Epoch 22/50  Train 19.0865  Val 0.7363  0.2s
Epoch 23/50  Train 19.0094  Val 0.7353  0.2s
Epoch 24/50  Train 18.9368  Val 0.7344  0.2s
Epoch 25/50  Train 18.8683  Val 0.7335  0.2s
Epoch 26/50  Train 18.8036  Val 0.7327  0.2s
Epoch 27/50  Train 18.7422  Val 0.7320  0.2s
Epoch 28/50  Train 18.6840  Val 0.7313  0.2s
Epoch 29/50  Train 18.6286  Val 0.7307  0.2s
Epoch 30/50  Train 18.5759  Val 0.7301  0.2s
Epoch 31/50  Train 18.5256  Val 0.7295  0.2s
Epoch 32/50  Train 18.4775  Val 0.7290  0.2s
Epoch 33/50  Train 18.4314  Val 0.7286  0.2s
Epoch 34/50  Train 18.3873  Val 0.7281  0.2s
Epoch 35/50  Train 18.3449  Val 0.7277  0.2s
Epoch 36/50  Train 18.3040  Val 0.7274  0.2s
Epoch 37/50  Train 18.2647  Val 0.7270  0.2s
Epoch 38/50  Train 18.2268  Val 0.7267  0.2s
Epoch 39/50  Train 18.1901  Val 0.7264  0.2s
Epoch 40/50  Train 18.1546  Val 0.7261  0.2s
Epoch 41/50  Train 18.1203  Val 0.7259  0.2s
Epoch 42/50  Train 18.0869  Val 0.7256  0.2s
Epoch 43/50  Train 18.0545  Val 0.7254  0.2s
Epoch 44/50  Train 18.0229  Val 0.7252  0.2s
Epoch 45/50  Train 17.9922  Val 0.7251  0.2s
Epoch 46/50  Train 17.9622  Val 0.7249  0.2s
Epoch 47/50  Train 17.9329  Val 0.7247  0.2s
Epoch 48/50  Train 17.9043  Val 0.7246  0.2s
Epoch 49/50  Train 17.8763  Val 0.7245  0.2s
Epoch 50/50  Train 17.8488  Val 0.7244  0.4s

Parameters used in the single-fit model:
input_len: 60
output_len: 1
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.64588919
1 day(s) RMSE                      : 9.25764049
1 day(s) R2                        : 0.03807696
1 day(s) Pearson r                 : 0.37444665
1 day(s) QLIKE                     : 0.34633521
full horizon MAE                   : 2.64588919
full horizon RMSE                  : 9.25764049
full horizon R2                    : 0.03807696
full horizon Pearson r             : 0.37444665
full horizon QLIKE                 : 0.34633521

--- Task 2 ---
1 day(s) MAE                       : 0.70233428
1 day(s) RMSE                      : 1.31995792
1 day(s) R2                        : -0.39480781
1 day(s) Pearson r                 : -0.03998046
1 day(s) QLIKE                     : 18.45444374
full horizon MAE                   : 0.70233428
full horizon RMSE                  : 1.31995792
full horizon R2                    : -0.39480781
full horizon Pearson r             : -0.03998046
full horizon QLIKE                 : 18.45444374

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/C/ITransformer_H1.pkl

Saved y_true min=0.525001, max=134.647
Saved y_pred min=2.13532, max=14.9598

=== C | H=5 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.80914068636715
  Min value:  -9.273594127665117
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.113808938949697
  Min value:  -2.5779301635708336
Checking X_price_val:
Shape: (161, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.833292676987295
  Min value:  -3.3424547859577265
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.645753754809614
  Min value:  -1.6623868571045624
Checking X_price_test:
Shape: (404, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.61558566904287
  Min value:  -5.593651881065264
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.110793676955011
  Min value:  -2.0580301692648475
Epoch 1/50  Train 26.2645  Val 0.8166  0.3s
Epoch 2/50  Train 25.8747  Val 0.8125  0.3s
Epoch 3/50  Train 25.5260  Val 0.8087  0.3s
Epoch 4/50  Train 25.1983  Val 0.8051  0.3s
Epoch 5/50  Train 24.8906  Val 0.8017  0.3s
Epoch 6/50  Train 24.6014  Val 0.7986  0.3s
Epoch 7/50  Train 24.3294  Val 0.7955  0.3s
Epoch 8/50  Train 24.0733  Val 0.7927  0.4s
Epoch 9/50  Train 23.8317  Val 0.7900  0.2s
Epoch 10/50  Train 23.6036  Val 0.7874  0.2s
Epoch 11/50  Train 23.3880  Val 0.7850  0.2s
Epoch 12/50  Train 23.1840  Val 0.7827  0.2s
Epoch 13/50  Train 22.9907  Val 0.7805  0.2s
Epoch 14/50  Train 22.8075  Val 0.7784  0.2s
Epoch 15/50  Train 22.6337  Val 0.7764  0.2s
Epoch 16/50  Train 22.4688  Val 0.7745  0.2s
Epoch 17/50  Train 22.3121  Val 0.7726  0.2s
Epoch 18/50  Train 22.1631  Val 0.7709  0.2s
Epoch 19/50  Train 22.0214  Val 0.7692  0.2s
Epoch 20/50  Train 21.8866  Val 0.7677  0.2s
Epoch 21/50  Train 21.7582  Val 0.7661  0.2s
Epoch 22/50  Train 21.6360  Val 0.7647  0.3s
Epoch 23/50  Train 21.5194  Val 0.7633  0.2s
Epoch 24/50  Train 21.4083  Val 0.7620  0.2s
Epoch 25/50  Train 21.3023  Val 0.7608  0.2s
Epoch 26/50  Train 21.2011  Val 0.7596  0.2s
Epoch 27/50  Train 21.1045  Val 0.7584  0.3s
Epoch 28/50  Train 21.0122  Val 0.7573  0.3s
Epoch 29/50  Train 20.9239  Val 0.7563  0.3s
Epoch 30/50  Train 20.8395  Val 0.7553  0.2s
Epoch 31/50  Train 20.7588  Val 0.7543  0.2s
Epoch 32/50  Train 20.6815  Val 0.7534  0.3s
Epoch 33/50  Train 20.6074  Val 0.7525  0.3s
Epoch 34/50  Train 20.5364  Val 0.7517  0.3s
Epoch 35/50  Train 20.4683  Val 0.7508  0.3s
Epoch 36/50  Train 20.4030  Val 0.7501  0.2s
Epoch 37/50  Train 20.3403  Val 0.7493  0.2s
Epoch 38/50  Train 20.2801  Val 0.7486  0.2s
Epoch 39/50  Train 20.2222  Val 0.7479  0.2s
Epoch 40/50  Train 20.1665  Val 0.7473  0.2s
Epoch 41/50  Train 20.1129  Val 0.7467  0.2s
Epoch 42/50  Train 20.0613  Val 0.7461  0.2s
Epoch 43/50  Train 20.0116  Val 0.7455  0.2s
Epoch 44/50  Train 19.9637  Val 0.7449  0.2s
Epoch 45/50  Train 19.9175  Val 0.7444  0.2s
Epoch 46/50  Train 19.8729  Val 0.7439  0.2s
Epoch 47/50  Train 19.8299  Val 0.7434  0.2s
Epoch 48/50  Train 19.7883  Val 0.7429  0.2s
Epoch 49/50  Train 19.7480  Val 0.7425  0.2s
Epoch 50/50  Train 19.7091  Val 0.7421  0.2s

Parameters used in the single-fit model:
input_len: 60
output_len: 5
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.68962434
1 day(s) RMSE                      : 9.37915454
1 day(s) R2                        : 0.01265918
1 day(s) Pearson r                 : 0.32807740
1 day(s) QLIKE                     : 0.36368343
3 day(s) MAE                       : 2.75327557
3 day(s) RMSE                      : 9.47084849
3 day(s) R2                        : -0.00183127
3 day(s) Pearson r                 : 0.25618223
3 day(s) QLIKE                     : 0.38494366
5 day(s) MAE                       : 2.79075278
5 day(s) RMSE                      : 9.52713456
5 day(s) R2                        : -0.01054736
5 day(s) Pearson r                 : 0.21004264
5 day(s) QLIKE                     : 0.39653872
full horizon MAE                   : 2.79075278
full horizon RMSE                  : 9.52713456
full horizon R2                    : -0.01054736
full horizon Pearson r             : 0.21004264
full horizon QLIKE                 : 0.39653872

--- Task 2 ---
1 day(s) MAE                       : 0.70232243
1 day(s) RMSE                      : 1.31993940
1 day(s) R2                        : -0.39476866
1 day(s) Pearson r                 : -0.02457740
1 day(s) QLIKE                     : 18.82805847
3 day(s) MAE                       : 0.70673424
3 day(s) RMSE                      : 1.32455486
3 day(s) R2                        : -0.39784783
3 day(s) Pearson r                 : -0.01318146
3 day(s) QLIKE                     : 20.49993037
5 day(s) MAE                       : 0.70936780
5 day(s) RMSE                      : 1.32667914
5 day(s) R2                        : -0.40023425
5 day(s) Pearson r                 : -0.01194272
5 day(s) QLIKE                     : 20.00497640
full horizon MAE                   : 0.70936780
full horizon RMSE                  : 1.32667914
full horizon R2                    : -0.40023425
full horizon Pearson r             : -0.01194272
full horizon QLIKE                 : 20.00497640

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/C/ITransformer_H5.pkl

Saved y_true min=0.525001, max=134.647
Saved y_pred min=2.07223, max=13.9202

=== C | H=10 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2018
Time steps for y: 10
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.80914068636715
  Min value:  -9.273594127665117
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.111052537752672
  Min value:  -2.5777824852265807
Checking X_price_val:
Shape: (161, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.833292676987295
  Min value:  -3.3424547859577265
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.6439291882044116
  Min value:  -1.6625848496872644
Checking X_price_test:
Shape: (404, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.61558566904287
  Min value:  -5.593651881065264
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.108415972443237
  Min value:  -2.058078783451692
Epoch 1/50  Train 26.2970  Val 0.8227  0.2s
Epoch 2/50  Train 26.0119  Val 0.8191  0.2s
Epoch 3/50  Train 25.7533  Val 0.8158  0.2s
Epoch 4/50  Train 25.5069  Val 0.8125  0.2s
Epoch 5/50  Train 25.2723  Val 0.8095  0.2s
Epoch 6/50  Train 25.0490  Val 0.8066  0.2s
Epoch 7/50  Train 24.8365  Val 0.8038  0.2s
Epoch 8/50  Train 24.6341  Val 0.8011  0.2s
Epoch 9/50  Train 24.4411  Val 0.7986  0.2s
Epoch 10/50  Train 24.2569  Val 0.7961  0.2s
Epoch 11/50  Train 24.0811  Val 0.7938  0.2s
Epoch 12/50  Train 23.9132  Val 0.7915  0.2s
Epoch 13/50  Train 23.7526  Val 0.7894  0.3s
Epoch 14/50  Train 23.5989  Val 0.7873  0.2s
Epoch 15/50  Train 23.4517  Val 0.7853  0.2s
Epoch 16/50  Train 23.3107  Val 0.7834  0.2s
Epoch 17/50  Train 23.1756  Val 0.7815  0.2s
Epoch 18/50  Train 23.0459  Val 0.7797  0.2s
Epoch 19/50  Train 22.9215  Val 0.7780  0.2s
Epoch 20/50  Train 22.8020  Val 0.7763  0.2s
Epoch 21/50  Train 22.6873  Val 0.7747  0.2s
Epoch 22/50  Train 22.5770  Val 0.7732  0.2s
Epoch 23/50  Train 22.4709  Val 0.7717  0.3s
Epoch 24/50  Train 22.3690  Val 0.7703  0.2s
Epoch 25/50  Train 22.2708  Val 0.7689  0.2s
Epoch 26/50  Train 22.1764  Val 0.7675  0.2s
Epoch 27/50  Train 22.0854  Val 0.7662  0.2s
Epoch 28/50  Train 21.9978  Val 0.7649  0.3s
Epoch 29/50  Train 21.9134  Val 0.7637  0.3s
Epoch 30/50  Train 21.8321  Val 0.7626  0.3s
Epoch 31/50  Train 21.7536  Val 0.7614  0.2s
Epoch 32/50  Train 21.6780  Val 0.7603  0.3s
Epoch 33/50  Train 21.6050  Val 0.7592  0.3s
Epoch 34/50  Train 21.5346  Val 0.7582  0.3s
Epoch 35/50  Train 21.4667  Val 0.7572  0.3s
Epoch 36/50  Train 21.4010  Val 0.7562  0.3s
Epoch 37/50  Train 21.3377  Val 0.7553  0.3s
Epoch 38/50  Train 21.2764  Val 0.7544  0.2s
Epoch 39/50  Train 21.2173  Val 0.7535  0.2s
Epoch 40/50  Train 21.1601  Val 0.7527  0.2s
Epoch 41/50  Train 21.1047  Val 0.7518  0.2s
Epoch 42/50  Train 21.0512  Val 0.7510  0.2s
Epoch 43/50  Train 20.9995  Val 0.7503  0.2s
Epoch 44/50  Train 20.9494  Val 0.7495  0.2s
Epoch 45/50  Train 20.9008  Val 0.7488  0.2s
Epoch 46/50  Train 20.8539  Val 0.7481  0.2s
Epoch 47/50  Train 20.8083  Val 0.7474  0.2s
Epoch 48/50  Train 20.7642  Val 0.7467  0.2s
Epoch 49/50  Train 20.7215  Val 0.7461  0.2s
Epoch 50/50  Train 20.6800  Val 0.7454  0.2s

Parameters used in the single-fit model:
input_len: 60
output_len: 10
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.69037872
1 day(s) RMSE                      : 9.38564782
1 day(s) R2                        : 0.01129162
1 day(s) Pearson r                 : 0.32184399
1 day(s) QLIKE                     : 0.36478074
3 day(s) MAE                       : 2.76187360
3 day(s) RMSE                      : 9.48619077
3 day(s) R2                        : -0.00507972
3 day(s) Pearson r                 : 0.23872082
3 day(s) QLIKE                     : 0.38878258
5 day(s) MAE                       : 2.79955066
5 day(s) RMSE                      : 9.53946106
5 day(s) R2                        : -0.01316400
5 day(s) Pearson r                 : 0.19726214
5 day(s) QLIKE                     : 0.39985257
10 day(s) MAE                      : 2.84440518
10 day(s) RMSE                     : 9.59662015
10 day(s) R2                       : -0.02294856
10 day(s) Pearson r                : 0.14174197
10 day(s) QLIKE                    : 0.41193269
full horizon MAE                   : 2.84440518
full horizon RMSE                  : 9.59662015
full horizon R2                    : -0.02294856
full horizon Pearson r             : 0.14174197
full horizon QLIKE                 : 0.41193269

--- Task 2 ---
1 day(s) MAE                       : 0.70232961
1 day(s) RMSE                      : 1.31990754
1 day(s) R2                        : -0.39470133
1 day(s) Pearson r                 : -0.00411324
1 day(s) QLIKE                     : 19.43378800
3 day(s) MAE                       : 0.70670024
3 day(s) RMSE                      : 1.32445608
3 day(s) R2                        : -0.39763934
3 day(s) Pearson r                 : 0.00484610
3 day(s) QLIKE                     : 19.38850505
5 day(s) MAE                       : 0.70934500
5 day(s) RMSE                      : 1.32661225
5 day(s) R2                        : -0.40009307
5 day(s) Pearson r                 : 0.00243988
5 day(s) QLIKE                     : 18.93820893
10 day(s) MAE                      : 0.71564725
10 day(s) RMSE                     : 1.33276365
10 day(s) R2                       : -0.40454283
10 day(s) Pearson r                : 0.00627965
10 day(s) QLIKE                    : 19.39781744
full horizon MAE                   : 0.71564725
full horizon RMSE                  : 1.33276365
full horizon R2                    : -0.40454283
full horizon Pearson r             : 0.00627965
full horizon QLIKE                 : 19.39781744

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/C/ITransformer_H10.pkl

Saved y_true min=0.525001, max=134.647
Saved y_pred min=1.88049, max=13.7505

=== C | H=20 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2018
Time steps for y: 20
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.80914068636715
  Min value:  -9.273594127665117
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.111456003225517
  Min value:  -2.5798794252140365
Checking X_price_val:
Shape: (161, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.833292676987295
  Min value:  -3.3424547859577265
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.643530346347732
  Min value:  -1.6643841678427629
Checking X_price_test:
Shape: (404, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.61558566904287
  Min value:  -5.593651881065264
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.108493380986118
  Min value:  -2.0600067160446893
Epoch 1/50  Train 27.1941  Val 0.8313  0.3s
Epoch 2/50  Train 26.9603  Val 0.8282  0.3s
Epoch 3/50  Train 26.7462  Val 0.8252  0.2s
Epoch 4/50  Train 26.5396  Val 0.8223  0.2s
Epoch 5/50  Train 26.3405  Val 0.8195  0.3s
Epoch 6/50  Train 26.1487  Val 0.8168  0.2s
Epoch 7/50  Train 25.9639  Val 0.8143  0.2s
Epoch 8/50  Train 25.7858  Val 0.8118  0.2s
Epoch 9/50  Train 25.6142  Val 0.8093  0.2s
Epoch 10/50  Train 25.4487  Val 0.8070  0.2s
Epoch 11/50  Train 25.2890  Val 0.8047  0.3s
Epoch 12/50  Train 25.1348  Val 0.8025  0.2s
Epoch 13/50  Train 24.9859  Val 0.8004  0.2s
Epoch 14/50  Train 24.8421  Val 0.7983  0.2s
Epoch 15/50  Train 24.7030  Val 0.7963  0.2s
Epoch 16/50  Train 24.5684  Val 0.7943  0.2s
Epoch 17/50  Train 24.4383  Val 0.7924  0.2s
Epoch 18/50  Train 24.3123  Val 0.7905  0.2s
Epoch 19/50  Train 24.1902  Val 0.7887  0.2s
Epoch 20/50  Train 24.0719  Val 0.7870  0.2s
Epoch 21/50  Train 23.9573  Val 0.7852  0.2s
Epoch 22/50  Train 23.8461  Val 0.7836  0.2s
Epoch 23/50  Train 23.7383  Val 0.7819  0.2s
Epoch 24/50  Train 23.6337  Val 0.7803  0.3s
Epoch 25/50  Train 23.5321  Val 0.7788  0.2s
Epoch 26/50  Train 23.4335  Val 0.7773  0.2s
Epoch 27/50  Train 23.3377  Val 0.7758  0.3s
Epoch 28/50  Train 23.2447  Val 0.7743  0.3s
Epoch 29/50  Train 23.1543  Val 0.7729  0.3s
Epoch 30/50  Train 23.0664  Val 0.7715  0.3s
Epoch 31/50  Train 22.9810  Val 0.7702  0.3s
Epoch 32/50  Train 22.8980  Val 0.7688  0.3s
Epoch 33/50  Train 22.8172  Val 0.7675  0.3s
Epoch 34/50  Train 22.7387  Val 0.7663  0.3s
Epoch 35/50  Train 22.6624  Val 0.7650  0.3s
Epoch 36/50  Train 22.5881  Val 0.7638  0.2s
Epoch 37/50  Train 22.5158  Val 0.7626  0.2s
Epoch 38/50  Train 22.4456  Val 0.7614  0.2s
Epoch 39/50  Train 22.3772  Val 0.7603  0.2s
Epoch 40/50  Train 22.3107  Val 0.7592  0.2s
Epoch 41/50  Train 22.2460  Val 0.7581  0.2s
Epoch 42/50  Train 22.1831  Val 0.7570  0.2s
Epoch 43/50  Train 22.1219  Val 0.7560  0.2s
Epoch 44/50  Train 22.0624  Val 0.7549  0.2s
Epoch 45/50  Train 22.0045  Val 0.7539  0.2s
Epoch 46/50  Train 21.9482  Val 0.7529  0.2s
Epoch 47/50  Train 21.8935  Val 0.7520  0.2s
Epoch 48/50  Train 21.8402  Val 0.7510  0.2s
Epoch 49/50  Train 21.7885  Val 0.7501  0.2s
Epoch 50/50  Train 21.7382  Val 0.7492  0.2s

Parameters used in the single-fit model:
input_len: 60
output_len: 20
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.70631479
1 day(s) RMSE                      : 9.42320368
1 day(s) R2                        : 0.00336333
1 day(s) Pearson r                 : 0.30236464
1 day(s) QLIKE                     : 0.37209230
3 day(s) MAE                       : 2.77249987
3 day(s) RMSE                      : 9.50783989
3 day(s) R2                        : -0.00967249
3 day(s) Pearson r                 : 0.21415863
3 day(s) QLIKE                     : 0.39384140
5 day(s) MAE                       : 2.80897893
5 day(s) RMSE                      : 9.55272473
5 day(s) R2                        : -0.01598337
5 day(s) Pearson r                 : 0.17910323
5 day(s) QLIKE                     : 0.40347178
10 day(s) MAE                      : 2.85058594
10 day(s) RMSE                     : 9.60195301
10 day(s) R2                       : -0.02408578
10 day(s) Pearson r                : 0.13423456
10 day(s) QLIKE                    : 0.41284053
20 day(s) MAE                      : 2.88366101
20 day(s) RMSE                     : 9.63394037
20 day(s) R2                       : -0.03010366
20 day(s) Pearson r                : 0.09118316
20 day(s) QLIKE                    : 0.41818786
full horizon MAE                   : 2.88366101
full horizon RMSE                  : 9.63394037
full horizon R2                    : -0.03010366
full horizon Pearson r             : 0.09118316
full horizon QLIKE                 : 0.41818786

--- Task 2 ---
1 day(s) MAE                       : 0.70226960
1 day(s) RMSE                      : 1.31979455
1 day(s) R2                        : -0.39446254
1 day(s) Pearson r                 : 0.02488110
1 day(s) QLIKE                     : 19.70470885
3 day(s) MAE                       : 0.70657815
3 day(s) RMSE                      : 1.32431427
3 day(s) R2                        : -0.39734007
3 day(s) Pearson r                 : 0.02169638
3 day(s) QLIKE                     : 19.24765300
5 day(s) MAE                       : 0.70927187
5 day(s) RMSE                      : 1.32651146
5 day(s) R2                        : -0.39988033
5 day(s) Pearson r                 : 0.01375201
5 day(s) QLIKE                     : 18.87363303
10 day(s) MAE                      : 0.71569802
10 day(s) RMSE                     : 1.33268300
10 day(s) R2                       : -0.40437284
10 day(s) Pearson r                : 0.00013399
10 day(s) QLIKE                    : 19.83611214
20 day(s) MAE                      : 0.72275205
20 day(s) RMSE                     : 1.34092018
20 day(s) R2                       : -0.40874050
20 day(s) Pearson r                : 0.00881202
20 day(s) QLIKE                    : 21.03369004
full horizon MAE                   : 0.72275205
full horizon RMSE                  : 1.34092018
full horizon R2                    : -0.40874050
full horizon Pearson r             : 0.00881202
full horizon QLIKE                 : 21.03369004

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/C/ITransformer_H20.pkl

Saved y_true min=0.525001, max=134.647
Saved y_pred min=1.68933, max=13.9002

=== BTCUSDT | H=1 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2399
Time steps for y: 1
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1727, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  27.572355514246752
  Min value:  -13.368147249813383
Checking X_time_train_core:
Shape: (1727, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.8178490491226165
Checking y_train_core (log_mse scaled):
Shape: (1727, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.551349524903824
  Min value:  -3.605334167767801
Checking X_price_val:
Shape: (192, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.571610898151818
  Min value:  -2.1425236768054545
Checking X_time_val:
Shape: (192, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_val (log_mse scaled):
Shape: (192, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3448190912377083
  Min value:  -4.2413266524913515
Checking X_price_test:
Shape: (480, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.959482481265578
  Min value:  -2.392150617253993
Checking X_time_test:
Shape: (480, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_test (log_mse scaled):
Shape: (480, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5802586573962314
  Min value:  -2.5674860022183505
Epoch 1/50  Train 30.8599  Val 1.2149  0.5s
Epoch 2/50  Train 29.9380  Val 1.2131  0.3s
Epoch 3/50  Train 29.1856  Val 1.2116  0.3s
Epoch 4/50  Train 28.5342  Val 1.2105  0.3s
Epoch 5/50  Train 27.9711  Val 1.2096  0.3s
Epoch 6/50  Train 27.4833  Val 1.2090  0.3s
Epoch 7/50  Train 27.0591  Val 1.2087  0.3s
Epoch 8/50  Train 26.6885  Val 1.2085  0.3s
Epoch 9/50  Train 26.3632  Val 1.2084  0.3s
Epoch 10/50  Train 26.0763  Val 1.2085  0.3s
Epoch 11/50  Train 25.8222  Val 1.2087  0.3s
Epoch 12/50  Train 25.5959  Val 1.2089  0.3s
Epoch 13/50  Train 25.3936  Val 1.2092  0.3s
Epoch 14/50  Train 25.2119  Val 1.2095  0.3s
Epoch 15/50  Train 25.0480  Val 1.2099  0.3s
Epoch 16/50  Train 24.8994  Val 1.2103  0.3s
Epoch 17/50  Train 24.7642  Val 1.2107  0.3s
Epoch 18/50  Train 24.6406  Val 1.2111  0.3s
Epoch 19/50  Train 24.5271  Val 1.2115  0.3s
Epoch 20/50  Train 24.4225  Val 1.2119  0.3s
Epoch 21/50  Train 24.3257  Val 1.2123  0.3s
Epoch 22/50  Train 24.2358  Val 1.2127  0.3s
Epoch 23/50  Train 24.1520  Val 1.2131  0.3s
Epoch 24/50  Train 24.0735  Val 1.2135  0.4s
Epoch 25/50  Train 23.9999  Val 1.2138  0.3s
Epoch 26/50  Train 23.9305  Val 1.2142  0.3s
Epoch 27/50  Train 23.8649  Val 1.2145  0.3s
Epoch 28/50  Train 23.8027  Val 1.2149  0.4s
Epoch 29/50  Train 23.7435  Val 1.2152  0.3s
Epoch 30/50  Train 23.6872  Val 1.2155  0.3s
Early stopping triggered.

Parameters used in the single-fit model:
input_len: 60
output_len: 1
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.56819729
1 day(s) RMSE                      : 9.13572798
1 day(s) R2                        : 0.01090433
1 day(s) Pearson r                 : 0.23279664
1 day(s) QLIKE                     : 0.36080001
full horizon MAE                   : 4.56819729
full horizon RMSE                  : 9.13572798
full horizon R2                    : 0.01090433
full horizon Pearson r             : 0.23279664
full horizon QLIKE                 : 0.36080001

--- Task 2 ---
1 day(s) MAE                       : 1.07432868
1 day(s) RMSE                      : 2.07893535
1 day(s) R2                        : -0.36418522
1 day(s) Pearson r                 : -0.02577862
1 day(s) QLIKE                     : 17.44495424
full horizon MAE                   : 1.07432868
full horizon RMSE                  : 2.07893535
full horizon R2                    : -0.36418522
full horizon Pearson r             : -0.02577862
full horizon QLIKE                 : 17.44495424

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/BTCUSDT/ITransformer_H1.pkl

Saved y_true min=0.505764, max=131.49
Saved y_pred min=5.36128, max=10.3

=== BTCUSDT | H=5 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2399
Time steps for y: 5
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1727, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  27.572355514246752
  Min value:  -13.368147249813383
Checking X_time_train_core:
Shape: (1727, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.8178490491226165
Checking y_train_core (log_mse scaled):
Shape: (1727, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.5541968122841485
  Min value:  -3.608723363499145
Checking X_price_val:
Shape: (192, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.571610898151818
  Min value:  -2.1425236768054545
Checking X_time_val:
Shape: (192, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_val (log_mse scaled):
Shape: (192, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.345979297163924
  Min value:  -4.245202118935104
Checking X_price_test:
Shape: (480, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.959482481265578
  Min value:  -2.392150617253993
Checking X_time_test:
Shape: (480, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_test (log_mse scaled):
Shape: (480, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5815988770306517
  Min value:  -2.5700816741597823
Epoch 1/50  Train 32.6186  Val 1.2527  0.5s
Epoch 2/50  Train 32.0400  Val 1.2506  0.3s
Epoch 3/50  Train 31.5298  Val 1.2485  0.3s
Epoch 4/50  Train 31.0571  Val 1.2465  0.3s
Epoch 5/50  Train 30.6203  Val 1.2447  0.3s
Epoch 6/50  Train 30.2169  Val 1.2429  0.3s
Epoch 7/50  Train 29.8438  Val 1.2412  0.3s
Epoch 8/50  Train 29.4984  Val 1.2396  0.3s
Epoch 9/50  Train 29.1782  Val 1.2382  0.3s
Epoch 10/50  Train 28.8809  Val 1.2368  0.3s
Epoch 11/50  Train 28.6047  Val 1.2355  0.3s
Epoch 12/50  Train 28.3476  Val 1.2343  0.3s
Epoch 13/50  Train 28.1082  Val 1.2332  0.3s
Epoch 14/50  Train 27.8850  Val 1.2322  0.3s
Epoch 15/50  Train 27.6766  Val 1.2313  0.3s
Epoch 16/50  Train 27.4820  Val 1.2304  0.3s
Epoch 17/50  Train 27.2999  Val 1.2296  0.3s
Epoch 18/50  Train 27.1294  Val 1.2289  0.3s
Epoch 19/50  Train 26.9697  Val 1.2282  0.3s
Epoch 20/50  Train 26.8198  Val 1.2275  0.3s
Epoch 21/50  Train 26.6790  Val 1.2270  0.3s
Epoch 22/50  Train 26.5467  Val 1.2264  0.3s
Epoch 23/50  Train 26.4222  Val 1.2259  0.3s
Epoch 24/50  Train 26.3049  Val 1.2255  0.3s
Epoch 25/50  Train 26.1943  Val 1.2250  0.3s
Epoch 26/50  Train 26.0898  Val 1.2246  0.3s
Epoch 27/50  Train 25.9911  Val 1.2243  0.3s
Epoch 28/50  Train 25.8978  Val 1.2239  0.3s
Epoch 29/50  Train 25.8094  Val 1.2236  0.3s
Epoch 30/50  Train 25.7255  Val 1.2233  0.4s
Epoch 31/50  Train 25.6460  Val 1.2231  0.3s
Epoch 32/50  Train 25.5704  Val 1.2228  0.3s
Epoch 33/50  Train 25.4986  Val 1.2226  0.3s
Epoch 34/50  Train 25.4302  Val 1.2224  0.4s
Epoch 35/50  Train 25.3650  Val 1.2222  0.3s
Epoch 36/50  Train 25.3028  Val 1.2220  0.3s
Epoch 37/50  Train 25.2434  Val 1.2218  0.3s
Epoch 38/50  Train 25.1866  Val 1.2217  0.3s
Epoch 39/50  Train 25.1323  Val 1.2215  0.3s
Epoch 40/50  Train 25.0803  Val 1.2214  0.3s
Epoch 41/50  Train 25.0305  Val 1.2213  0.3s
Epoch 42/50  Train 24.9826  Val 1.2211  0.3s
Epoch 43/50  Train 24.9367  Val 1.2210  0.3s
Epoch 44/50  Train 24.8925  Val 1.2209  0.3s
Epoch 45/50  Train 24.8500  Val 1.2208  0.3s
Epoch 46/50  Train 24.8090  Val 1.2207  0.3s
Epoch 47/50  Train 24.7695  Val 1.2206  0.3s
Epoch 48/50  Train 24.7315  Val 1.2206  0.3s
Epoch 49/50  Train 24.6947  Val 1.2205  0.3s
Epoch 50/50  Train 24.6591  Val 1.2204  0.3s

Parameters used in the single-fit model:
input_len: 60
output_len: 5
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.57932620
1 day(s) RMSE                      : 9.14450809
1 day(s) R2                        : 0.00900223
1 day(s) Pearson r                 : 0.21966725
1 day(s) QLIKE                     : 0.36219083
3 day(s) MAE                       : 4.63116424
3 day(s) RMSE                      : 9.19633994
3 day(s) R2                        : -0.00212486
3 day(s) Pearson r                 : 0.14545807
3 day(s) QLIKE                     : 0.37042560
5 day(s) MAE                       : 4.65333735
5 day(s) RMSE                      : 9.22392387
5 day(s) R2                        : -0.00846164
5 day(s) Pearson r                 : 0.09558021
5 day(s) QLIKE                     : 0.37417337
full horizon MAE                   : 4.65333735
full horizon RMSE                  : 9.22392387
full horizon R2                    : -0.00846164
full horizon Pearson r             : 0.09558021
full horizon QLIKE                 : 0.37417337

--- Task 2 ---
1 day(s) MAE                       : 1.07441102
1 day(s) RMSE                      : 2.07892683
1 day(s) R2                        : -0.36417403
1 day(s) Pearson r                 : -0.02518527
1 day(s) QLIKE                     : 17.05803713
3 day(s) MAE                       : 1.07488781
3 day(s) RMSE                      : 2.07925103
3 day(s) R2                        : -0.36456769
3 day(s) Pearson r                 : -0.00351063
3 day(s) QLIKE                     : 16.77037832
5 day(s) MAE                       : 1.07514942
5 day(s) RMSE                      : 2.07933487
5 day(s) R2                        : -0.36480724
5 day(s) Pearson r                 : 0.00157542
5 day(s) QLIKE                     : 16.57784562
full horizon MAE                   : 1.07514942
full horizon RMSE                  : 2.07933487
full horizon R2                    : -0.36480724
full horizon Pearson r             : 0.00157542
full horizon QLIKE                 : 16.57784562

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/BTCUSDT/ITransformer_H5.pkl

Saved y_true min=0.505764, max=131.49
Saved y_pred min=5.05334, max=10.371

=== BTCUSDT | H=10 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2399
Time steps for y: 10
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1727, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  27.572355514246752
  Min value:  -13.368147249813383
Checking X_time_train_core:
Shape: (1727, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.8178490491226165
Checking y_train_core (log_mse scaled):
Shape: (1727, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.556915178048574
  Min value:  -3.6122923012225012
Checking X_price_val:
Shape: (192, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.571610898151818
  Min value:  -2.1425236768054545
Checking X_time_val:
Shape: (192, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_val (log_mse scaled):
Shape: (192, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3469968336435327
  Min value:  -4.249261289934393
Checking X_price_test:
Shape: (480, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.959482481265578
  Min value:  -2.392150617253993
Checking X_time_test:
Shape: (480, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_test (log_mse scaled):
Shape: (480, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5827978941292526
  Min value:  -2.57285062176008
Epoch 1/50  Train 33.2139  Val 1.2563  0.4s
Epoch 2/50  Train 32.7574  Val 1.2545  0.3s
Epoch 3/50  Train 32.3451  Val 1.2528  0.3s
Epoch 4/50  Train 31.9559  Val 1.2511  0.3s
Epoch 5/50  Train 31.5896  Val 1.2494  0.3s
Epoch 6/50  Train 31.2453  Val 1.2478  0.3s
Epoch 7/50  Train 30.9216  Val 1.2463  0.3s
Epoch 8/50  Train 30.6168  Val 1.2448  0.3s
Epoch 9/50  Train 30.3296  Val 1.2433  0.3s
Epoch 10/50  Train 30.0588  Val 1.2419  0.3s
Epoch 11/50  Train 29.8032  Val 1.2406  0.3s
Epoch 12/50  Train 29.5617  Val 1.2393  0.3s
Epoch 13/50  Train 29.3335  Val 1.2381  0.3s
Epoch 14/50  Train 29.1175  Val 1.2369  0.3s
Epoch 15/50  Train 28.9131  Val 1.2358  0.3s
Epoch 16/50  Train 28.7194  Val 1.2348  0.3s
Epoch 17/50  Train 28.5359  Val 1.2338  0.3s
Epoch 18/50  Train 28.3619  Val 1.2328  0.3s
Epoch 19/50  Train 28.1968  Val 1.2319  0.3s
Epoch 20/50  Train 28.0401  Val 1.2310  0.3s
Epoch 21/50  Train 27.8913  Val 1.2302  0.3s
Epoch 22/50  Train 27.7500  Val 1.2294  0.3s
Epoch 23/50  Train 27.6156  Val 1.2286  0.3s
Epoch 24/50  Train 27.4879  Val 1.2279  0.3s
Epoch 25/50  Train 27.3664  Val 1.2272  0.3s
Epoch 26/50  Train 27.2507  Val 1.2266  0.3s
Epoch 27/50  Train 27.1407  Val 1.2260  0.3s
Epoch 28/50  Train 27.0358  Val 1.2254  0.3s
Epoch 29/50  Train 26.9359  Val 1.2248  0.3s
Epoch 30/50  Train 26.8407  Val 1.2243  0.3s
Epoch 31/50  Train 26.7500  Val 1.2238  0.3s
Epoch 32/50  Train 26.6634  Val 1.2233  0.3s
Epoch 33/50  Train 26.5808  Val 1.2229  0.3s
Epoch 34/50  Train 26.5019  Val 1.2225  0.3s
Epoch 35/50  Train 26.4266  Val 1.2221  0.3s
Epoch 36/50  Train 26.3546  Val 1.2217  0.3s
Epoch 37/50  Train 26.2858  Val 1.2213  0.3s
Epoch 38/50  Train 26.2201  Val 1.2209  0.3s
Epoch 39/50  Train 26.1572  Val 1.2206  0.3s
Epoch 40/50  Train 26.0970  Val 1.2203  0.4s
Epoch 41/50  Train 26.0394  Val 1.2200  0.3s
Epoch 42/50  Train 25.9842  Val 1.2197  0.3s
Epoch 43/50  Train 25.9313  Val 1.2194  0.4s
Epoch 44/50  Train 25.8807  Val 1.2192  0.3s
Epoch 45/50  Train 25.8321  Val 1.2189  0.3s
Epoch 46/50  Train 25.7855  Val 1.2187  0.3s
Epoch 47/50  Train 25.7408  Val 1.2185  0.4s
Epoch 48/50  Train 25.6978  Val 1.2182  0.3s
Epoch 49/50  Train 25.6566  Val 1.2180  0.3s
Epoch 50/50  Train 25.6170  Val 1.2178  0.3s

Parameters used in the single-fit model:
input_len: 60
output_len: 10
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.58544661
1 day(s) RMSE                      : 9.14835549
1 day(s) R2                        : 0.00816816
1 day(s) Pearson r                 : 0.21215352
1 day(s) QLIKE                     : 0.36296119
3 day(s) MAE                       : 4.64641993
3 day(s) RMSE                      : 9.20348794
3 day(s) R2                        : -0.00368330
3 day(s) Pearson r                 : 0.13168242
3 day(s) QLIKE                     : 0.37183984
5 day(s) MAE                       : 4.67175682
5 day(s) RMSE                      : 9.23254202
5 day(s) R2                        : -0.01034698
5 day(s) Pearson r                 : 0.07788501
5 day(s) QLIKE                     : 0.37587060
10 day(s) MAE                      : 4.69765340
10 day(s) RMSE                     : 9.26094785
10 day(s) R2                       : -0.01632901
10 day(s) Pearson r                : 0.02949930
10 day(s) QLIKE                    : 0.38041279
full horizon MAE                   : 4.69765340
full horizon RMSE                  : 9.26094785
full horizon R2                    : -0.01632901
full horizon Pearson r             : 0.02949930
full horizon QLIKE                 : 0.38041279

--- Task 2 ---
1 day(s) MAE                       : 1.07456617
1 day(s) RMSE                      : 2.07888773
1 day(s) R2                        : -0.36412272
1 day(s) Pearson r                 : -0.02377877
1 day(s) QLIKE                     : 17.19035464
3 day(s) MAE                       : 1.07493604
3 day(s) RMSE                      : 2.07923487
3 day(s) R2                        : -0.36454648
3 day(s) Pearson r                 : -0.00697546
3 day(s) QLIKE                     : 16.86138621
5 day(s) MAE                       : 1.07517958
5 day(s) RMSE                      : 2.07932597
5 day(s) R2                        : -0.36479556
5 day(s) Pearson r                 : -0.00401541
5 day(s) QLIKE                     : 16.74062254
10 day(s) MAE                      : 1.07573453
10 day(s) RMSE                     : 2.07955548
10 day(s) R2                       : -0.36523024
10 day(s) Pearson r                : -0.00020532
10 day(s) QLIKE                    : 16.56821238
full horizon MAE                   : 1.07573453
full horizon RMSE                  : 2.07955548
full horizon R2                    : -0.36523024
full horizon Pearson r             : -0.00020532
full horizon QLIKE                 : 16.56821238

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/BTCUSDT/ITransformer_H10.pkl

Saved y_true min=0.505764, max=131.49
Saved y_pred min=4.96807, max=10.5114

=== BTCUSDT | H=20 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 2399
Time steps for y: 20
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (1727, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  27.572355514246752
  Min value:  -13.368147249813383
Checking X_time_train_core:
Shape: (1727, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.8178490491226165
Checking y_train_core (log_mse scaled):
Shape: (1727, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.564043013270618
  Min value:  -3.620214771499493
Checking X_price_val:
Shape: (192, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.571610898151818
  Min value:  -2.1425236768054545
Checking X_time_val:
Shape: (192, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_val (log_mse scaled):
Shape: (192, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3500532890780494
  Min value:  -4.25835726179007
Checking X_price_test:
Shape: (480, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.959482481265578
  Min value:  -2.392150617253993
Checking X_time_test:
Shape: (480, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_test (log_mse scaled):
Shape: (480, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5862887708961093
  Min value:  -2.578858106420164
Epoch 1/50  Train 34.2684  Val 1.2553  0.4s
Epoch 2/50  Train 33.8747  Val 1.2530  0.3s
Epoch 3/50  Train 33.5116  Val 1.2508  0.3s
Epoch 4/50  Train 33.1639  Val 1.2487  0.3s
Epoch 5/50  Train 32.8322  Val 1.2466  0.3s
Epoch 6/50  Train 32.5164  Val 1.2446  0.3s
Epoch 7/50  Train 32.2156  Val 1.2427  0.3s
Epoch 8/50  Train 31.9287  Val 1.2408  0.3s
Epoch 9/50  Train 31.6549  Val 1.2391  0.3s
Epoch 10/50  Train 31.3932  Val 1.2374  0.3s
Epoch 11/50  Train 31.1427  Val 1.2357  0.3s
Epoch 12/50  Train 30.9028  Val 1.2342  0.3s
Epoch 13/50  Train 30.6726  Val 1.2326  0.3s
Epoch 14/50  Train 30.4515  Val 1.2312  0.3s
Epoch 15/50  Train 30.2390  Val 1.2298  0.3s
Epoch 16/50  Train 30.0346  Val 1.2284  0.3s
Epoch 17/50  Train 29.8377  Val 1.2271  0.3s
Epoch 18/50  Train 29.6481  Val 1.2258  0.3s
Epoch 19/50  Train 29.4652  Val 1.2246  0.3s
Epoch 20/50  Train 29.2889  Val 1.2234  0.3s
Epoch 21/50  Train 29.1189  Val 1.2222  0.3s
Epoch 22/50  Train 28.9548  Val 1.2211  0.3s
Epoch 23/50  Train 28.7965  Val 1.2200  0.3s
Epoch 24/50  Train 28.6438  Val 1.2190  0.3s
Epoch 25/50  Train 28.4966  Val 1.2179  0.3s
Epoch 26/50  Train 28.3546  Val 1.2169  0.3s
Epoch 27/50  Train 28.2178  Val 1.2160  0.3s
Epoch 28/50  Train 28.0861  Val 1.2151  0.3s
Epoch 29/50  Train 27.9593  Val 1.2142  0.3s
Epoch 30/50  Train 27.8375  Val 1.2133  0.3s
Epoch 31/50  Train 27.7205  Val 1.2125  0.3s
Epoch 32/50  Train 27.6082  Val 1.2116  0.3s
Epoch 33/50  Train 27.5006  Val 1.2109  0.3s
Epoch 34/50  Train 27.3976  Val 1.2101  0.3s
Epoch 35/50  Train 27.2992  Val 1.2094  0.3s
Epoch 36/50  Train 27.2051  Val 1.2087  0.3s
Epoch 37/50  Train 27.1155  Val 1.2080  0.3s
Epoch 38/50  Train 27.0301  Val 1.2074  0.3s
Epoch 39/50  Train 26.9489  Val 1.2068  0.3s
Epoch 40/50  Train 26.8717  Val 1.2062  0.3s
Epoch 41/50  Train 26.7985  Val 1.2057  0.3s
Epoch 42/50  Train 26.7291  Val 1.2051  0.3s
Epoch 43/50  Train 26.6634  Val 1.2046  0.3s
Epoch 44/50  Train 26.6012  Val 1.2041  0.3s
Epoch 45/50  Train 26.5425  Val 1.2037  0.3s
Epoch 46/50  Train 26.4870  Val 1.2033  0.3s
Epoch 47/50  Train 26.4346  Val 1.2029  0.3s
Epoch 48/50  Train 26.3852  Val 1.2025  0.3s
Epoch 49/50  Train 26.3386  Val 1.2021  0.3s
Epoch 50/50  Train 26.2947  Val 1.2018  0.3s

Parameters used in the single-fit model:
input_len: 60
output_len: 20
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.63929678
1 day(s) RMSE                      : 9.18348201
1 day(s) R2                        : 0.00053695
1 day(s) Pearson r                 : 0.16082911
1 day(s) QLIKE                     : 0.36934606
3 day(s) MAE                       : 4.68231190
3 day(s) RMSE                      : 9.22573041
3 day(s) R2                        : -0.00854045
3 day(s) Pearson r                 : 0.09061207
3 day(s) QLIKE                     : 0.37610574
5 day(s) MAE                       : 4.70119127
5 day(s) RMSE                      : 9.24766953
5 day(s) R2                        : -0.01366060
5 day(s) Pearson r                 : 0.04548436
5 day(s) QLIKE                     : 0.37889718
10 day(s) MAE                      : 4.72196144
10 day(s) RMSE                     : 9.27015628
10 day(s) R2                       : -0.01835114
10 day(s) Pearson r                : 0.00381513
10 day(s) QLIKE                    : 0.38248811
20 day(s) MAE                      : 4.74077585
20 day(s) RMSE                     : 9.29371604
20 day(s) R2                       : -0.02389550
20 day(s) Pearson r                : -0.04187069
20 day(s) QLIKE                    : 0.38518369
full horizon MAE                   : 4.74077585
full horizon RMSE                  : 9.29371604
full horizon R2                    : -0.02389550
full horizon Pearson r             : -0.04187069
full horizon QLIKE                 : 0.38518369

--- Task 2 ---
1 day(s) MAE                       : 1.07430688
1 day(s) RMSE                      : 2.07893146
1 day(s) R2                        : -0.36418011
1 day(s) Pearson r                 : -0.02172156
1 day(s) QLIKE                     : 16.41582240
3 day(s) MAE                       : 1.07485180
3 day(s) RMSE                      : 2.07925768
3 day(s) R2                        : -0.36457642
3 day(s) Pearson r                 : 0.00564538
3 day(s) QLIKE                     : 16.14046695
5 day(s) MAE                       : 1.07513051
5 day(s) RMSE                      : 2.07934401
5 day(s) R2                        : -0.36481924
5 day(s) Pearson r                 : 0.00788970
5 day(s) QLIKE                     : 16.01422074
10 day(s) MAE                      : 1.07572604
10 day(s) RMSE                     : 2.07958537
10 day(s) R2                       : -0.36526948
10 day(s) Pearson r                : -0.00262408
10 day(s) QLIKE                    : 15.87760580
20 day(s) MAE                      : 1.07565920
20 day(s) RMSE                     : 2.07745150
20 day(s) R2                       : -0.36623701
20 day(s) Pearson r                : -0.00558122
20 day(s) QLIKE                    : 15.99342530
full horizon MAE                   : 1.07565920
full horizon RMSE                  : 2.07745150
full horizon R2                    : -0.36623701
full horizon Pearson r             : -0.00558122
full horizon QLIKE                 : 15.99342530

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/BTCUSDT/ITransformer_H20.pkl

Saved y_true min=0.505764, max=131.49
Saved y_pred min=4.89321, max=9.37998

=== EURUSD | H=1 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 3782
Time steps for y: 1
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525526
  Min value:  -6.065318026131415
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.996396510173233
  Min value:  -3.718602223017602
Checking X_price_val:
Shape: (302, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.40892116517157
  Min value:  -3.843231093045373
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.319059579642507
  Min value:  -2.9759282116206096
Checking X_price_test:
Shape: (757, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144347
  Min value:  -4.219325170615048
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3834322546316087
  Min value:  -5.3499699676080334
Epoch 1/50  Train 51.8466  Val 1.3247  0.6s
Epoch 2/50  Train 48.6439  Val 1.2399  0.4s
Epoch 3/50  Train 46.2796  Val 1.1752  0.4s
Epoch 4/50  Train 44.4880  Val 1.1251  0.5s
Epoch 5/50  Train 43.1070  Val 1.0857  0.5s
Epoch 6/50  Train 42.0202  Val 1.0541  0.5s
Epoch 7/50  Train 41.1478  Val 1.0284  0.6s
Epoch 8/50  Train 40.4344  Val 1.0072  0.5s
Epoch 9/50  Train 39.8407  Val 0.9893  0.5s
Epoch 10/50  Train 39.3385  Val 0.9742  0.4s
Epoch 11/50  Train 38.9068  Val 0.9612  0.4s
Epoch 12/50  Train 38.5304  Val 0.9499  0.4s
Epoch 13/50  Train 38.1977  Val 0.9400  0.4s
Epoch 14/50  Train 37.8999  Val 0.9312  0.4s
Epoch 15/50  Train 37.6303  Val 0.9233  0.4s
Epoch 16/50  Train 37.3839  Val 0.9163  0.4s
Epoch 17/50  Train 37.1566  Val 0.9099  0.4s
Epoch 18/50  Train 36.9453  Val 0.9040  0.4s
Epoch 19/50  Train 36.7476  Val 0.8987  0.4s
Epoch 20/50  Train 36.5616  Val 0.8938  0.4s
Epoch 21/50  Train 36.3855  Val 0.8892  0.4s
Epoch 22/50  Train 36.2181  Val 0.8850  0.5s
Epoch 23/50  Train 36.0584  Val 0.8811  0.5s
Epoch 24/50  Train 35.9054  Val 0.8775  0.5s
Epoch 25/50  Train 35.7584  Val 0.8741  0.5s
Epoch 26/50  Train 35.6167  Val 0.8708  0.5s
Epoch 27/50  Train 35.4798  Val 0.8678  0.4s
Epoch 28/50  Train 35.3471  Val 0.8650  0.4s
Epoch 29/50  Train 35.2184  Val 0.8623  0.4s
Epoch 30/50  Train 35.0932  Val 0.8598  0.4s
Epoch 31/50  Train 34.9711  Val 0.8574  0.4s
Epoch 32/50  Train 34.8520  Val 0.8551  0.4s
Epoch 33/50  Train 34.7355  Val 0.8529  0.4s
Epoch 34/50  Train 34.6215  Val 0.8509  0.4s
Epoch 35/50  Train 34.5097  Val 0.8490  0.4s
Epoch 36/50  Train 34.4000  Val 0.8471  0.4s
Epoch 37/50  Train 34.2921  Val 0.8453  0.4s
Epoch 38/50  Train 34.1860  Val 0.8437  0.4s
Epoch 39/50  Train 34.0815  Val 0.8421  0.4s
Epoch 40/50  Train 33.9784  Val 0.8406  0.4s
Epoch 41/50  Train 33.8768  Val 0.8391  0.4s
Epoch 42/50  Train 33.7764  Val 0.8378  0.4s
Epoch 43/50  Train 33.6772  Val 0.8365  0.4s
Epoch 44/50  Train 33.5790  Val 0.8352  0.4s
Epoch 45/50  Train 33.4819  Val 0.8341  0.4s
Epoch 46/50  Train 33.3858  Val 0.8330  0.4s
Epoch 47/50  Train 33.2905  Val 0.8319  0.4s
Epoch 48/50  Train 33.1960  Val 0.8309  0.4s
Epoch 49/50  Train 33.1023  Val 0.8300  0.4s
Epoch 50/50  Train 33.0093  Val 0.8291  0.4s

Parameters used in the single-fit model:
input_len: 60
output_len: 1
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.10740309
1 day(s) RMSE                      : 0.21317248
1 day(s) R2                        : 0.00440063
1 day(s) Pearson r                 : 0.39703932
1 day(s) QLIKE                     : 0.40742558
full horizon MAE                   : 0.10740309
full horizon RMSE                  : 0.21317248
full horizon R2                    : 0.00440063
full horizon Pearson r             : 0.39703932
full horizon QLIKE                 : 0.40742558

--- Task 2 ---
1 day(s) MAE                       : 0.15718333
1 day(s) RMSE                      : 0.31431941
1 day(s) R2                        : -0.35386302
1 day(s) Pearson r                 : -0.01014323
1 day(s) QLIKE                     : 20.56105744
full horizon MAE                   : 0.15718333
full horizon RMSE                  : 0.31431941
full horizon R2                    : -0.35386302
full horizon Pearson r             : -0.01014323
full horizon QLIKE                 : 20.56105744

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/EURUSD/ITransformer_H1.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.0226416, max=1.39534

=== EURUSD | H=5 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 3782
Time steps for y: 5
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525526
  Min value:  -6.065318026131415
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.997012744911091
  Min value:  -3.7204515821714925
Checking X_price_val:
Shape: (302, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.40892116517157
  Min value:  -3.843231093045373
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3194271117573377
  Min value:  -2.977504877870329
Checking X_price_test:
Shape: (757, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144347
  Min value:  -4.219325170615048
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3838234229198108
  Min value:  -5.352418327671095
Epoch 1/50  Train 51.8824  Val 1.3031  0.6s
Epoch 2/50  Train 50.0269  Val 1.2519  0.4s
Epoch 3/50  Train 48.4626  Val 1.2083  0.4s
Epoch 4/50  Train 47.1263  Val 1.1710  0.4s
Epoch 5/50  Train 45.9842  Val 1.1390  0.4s
Epoch 6/50  Train 45.0035  Val 1.1114  0.4s
Epoch 7/50  Train 44.1571  Val 1.0874  0.4s
Epoch 8/50  Train 43.4225  Val 1.0664  0.4s
Epoch 9/50  Train 42.7814  Val 1.0480  0.4s
Epoch 10/50  Train 42.2188  Val 1.0318  0.4s
Epoch 11/50  Train 41.7224  Val 1.0175  0.5s
Epoch 12/50  Train 41.2820  Val 1.0047  0.5s
Epoch 13/50  Train 40.8892  Val 0.9933  0.5s
Epoch 14/50  Train 40.5371  Val 0.9830  0.6s
Epoch 15/50  Train 40.2199  Val 0.9738  0.5s
Epoch 16/50  Train 39.9327  Val 0.9654  0.5s
Epoch 17/50  Train 39.6714  Val 0.9579  0.4s
Epoch 18/50  Train 39.4327  Val 0.9509  0.4s
Epoch 19/50  Train 39.2136  Val 0.9446  0.4s
Epoch 20/50  Train 39.0117  Val 0.9388  0.4s
Epoch 21/50  Train 38.8248  Val 0.9335  0.4s
Epoch 22/50  Train 38.6512  Val 0.9286  0.4s
Epoch 23/50  Train 38.4893  Val 0.9240  0.4s
Epoch 24/50  Train 38.3378  Val 0.9198  0.4s
Epoch 25/50  Train 38.1956  Val 0.9159  0.4s
Epoch 26/50  Train 38.0616  Val 0.9122  0.4s
Epoch 27/50  Train 37.9350  Val 0.9088  0.5s
Epoch 28/50  Train 37.8151  Val 0.9055  0.5s
Epoch 29/50  Train 37.7010  Val 0.9025  0.4s
Epoch 30/50  Train 37.5923  Val 0.8996  0.4s
Epoch 31/50  Train 37.4885  Val 0.8969  0.4s
Epoch 32/50  Train 37.3890  Val 0.8944  0.4s
Epoch 33/50  Train 37.2935  Val 0.8919  0.4s
Epoch 34/50  Train 37.2016  Val 0.8896  0.4s
Epoch 35/50  Train 37.1129  Val 0.8874  0.4s
Epoch 36/50  Train 37.0273  Val 0.8853  0.4s
Epoch 37/50  Train 36.9444  Val 0.8833  0.4s
Epoch 38/50  Train 36.8640  Val 0.8814  0.4s
Epoch 39/50  Train 36.7859  Val 0.8796  0.4s
Epoch 40/50  Train 36.7098  Val 0.8778  0.4s
Epoch 41/50  Train 36.6357  Val 0.8761  0.4s
Epoch 42/50  Train 36.5634  Val 0.8745  0.4s
Epoch 43/50  Train 36.4927  Val 0.8729  0.4s
Epoch 44/50  Train 36.4235  Val 0.8714  0.4s
Epoch 45/50  Train 36.3557  Val 0.8699  0.4s
Epoch 46/50  Train 36.2891  Val 0.8685  0.4s
Epoch 47/50  Train 36.2238  Val 0.8671  0.4s
Epoch 48/50  Train 36.1595  Val 0.8658  0.4s
Epoch 49/50  Train 36.0963  Val 0.8645  0.4s
Epoch 50/50  Train 36.0340  Val 0.8633  0.4s

Parameters used in the single-fit model:
input_len: 60
output_len: 5
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.11069154
1 day(s) RMSE                      : 0.21376866
1 day(s) R2                        : -0.00117593
1 day(s) Pearson r                 : 0.38421541
1 day(s) QLIKE                     : 0.43660064
3 day(s) MAE                       : 0.11004906
3 day(s) RMSE                      : 0.21312944
3 day(s) R2                        : -0.00287517
3 day(s) Pearson r                 : 0.36990348
3 day(s) QLIKE                     : 0.43712543
5 day(s) MAE                       : 0.11013812
5 day(s) RMSE                      : 0.21358665
5 day(s) R2                        : -0.01862768
5 day(s) Pearson r                 : 0.35784695
5 day(s) QLIKE                     : 0.43917631
full horizon MAE                   : 0.11013812
full horizon RMSE                  : 0.21358665
full horizon R2                    : -0.01862768
full horizon Pearson r             : 0.35784695
full horizon QLIKE                 : 0.43917631

--- Task 2 ---
1 day(s) MAE                       : 0.15554622
1 day(s) RMSE                      : 0.31160090
1 day(s) R2                        : -0.33054559
1 day(s) Pearson r                 : -0.02222840
1 day(s) QLIKE                     : 17.47711197
3 day(s) MAE                       : 0.15558054
3 day(s) RMSE                      : 0.31315213
3 day(s) R2                        : -0.35985700
3 day(s) Pearson r                 : 0.04490921
3 day(s) QLIKE                     : 19.85736359
5 day(s) MAE                       : 0.15492740
5 day(s) RMSE                      : 0.31176308
5 day(s) R2                        : -0.35444635
5 day(s) Pearson r                 : 0.03787643
5 day(s) QLIKE                     : 19.37374005
full horizon MAE                   : 0.15492740
full horizon RMSE                  : 0.31176308
full horizon R2                    : -0.35444635
full horizon Pearson r             : 0.03787643
full horizon QLIKE                 : 19.37374005

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/EURUSD/ITransformer_H5.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.0332308, max=1.59374

=== EURUSD | H=10 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 3782
Time steps for y: 10
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525526
  Min value:  -6.065318026131415
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.9973267885669737
  Min value:  -3.719996871136402
Checking X_price_val:
Shape: (302, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.40892116517157
  Min value:  -3.843231093045373
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3197553444271937
  Min value:  -2.9770657245458123
Checking X_price_test:
Shape: (757, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144347
  Min value:  -4.219325170615048
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3841503070956698
  Min value:  -5.351929442359245
Epoch 1/50  Train 51.9688  Val 1.3031  0.6s
Epoch 2/50  Train 50.5664  Val 1.2625  0.4s
Epoch 3/50  Train 49.3322  Val 1.2267  0.4s
Epoch 4/50  Train 48.2363  Val 1.1951  0.4s
Epoch 5/50  Train 47.2663  Val 1.1673  0.4s
Epoch 6/50  Train 46.4064  Val 1.1426  0.4s
Epoch 7/50  Train 45.6421  Val 1.1207  0.4s
Epoch 8/50  Train 44.9607  Val 1.1012  0.4s
Epoch 9/50  Train 44.3513  Val 1.0837  0.4s
Epoch 10/50  Train 43.8045  Val 1.0681  0.4s
Epoch 11/50  Train 43.3122  Val 1.0540  0.4s
Epoch 12/50  Train 42.8677  Val 1.0412  0.4s
Epoch 13/50  Train 42.4649  Val 1.0297  0.4s
Epoch 14/50  Train 42.0989  Val 1.0192  0.6s
Epoch 15/50  Train 41.7652  Val 1.0096  0.5s
Epoch 16/50  Train 41.4601  Val 1.0009  0.5s
Epoch 17/50  Train 41.1803  Val 0.9928  0.6s
Epoch 18/50  Train 40.9231  Val 0.9855  0.5s
Epoch 19/50  Train 40.6860  Val 0.9787  0.4s
Epoch 20/50  Train 40.4668  Val 0.9724  0.4s
Epoch 21/50  Train 40.2638  Val 0.9667  0.4s
Epoch 22/50  Train 40.0751  Val 0.9613  0.4s
Epoch 23/50  Train 39.8995  Val 0.9563  0.4s
Epoch 24/50  Train 39.7355  Val 0.9517  0.4s
Epoch 25/50  Train 39.5822  Val 0.9473  0.4s
Epoch 26/50  Train 39.4385  Val 0.9433  0.4s
Epoch 27/50  Train 39.3034  Val 0.9394  0.4s
Epoch 28/50  Train 39.1763  Val 0.9359  0.4s
Epoch 29/50  Train 39.0563  Val 0.9325  0.4s
Epoch 30/50  Train 38.9428  Val 0.9293  0.4s
Epoch 31/50  Train 38.8353  Val 0.9263  0.4s
Epoch 32/50  Train 38.7332  Val 0.9235  0.4s
Epoch 33/50  Train 38.6361  Val 0.9208  0.4s
Epoch 34/50  Train 38.5435  Val 0.9183  0.4s
Epoch 35/50  Train 38.4550  Val 0.9158  0.4s
Epoch 36/50  Train 38.3703  Val 0.9135  0.4s
Epoch 37/50  Train 38.2891  Val 0.9113  0.4s
Epoch 38/50  Train 38.2111  Val 0.9092  0.4s
Epoch 39/50  Train 38.1360  Val 0.9072  0.4s
Epoch 40/50  Train 38.0635  Val 0.9052  0.4s
Epoch 41/50  Train 37.9935  Val 0.9033  0.4s
Epoch 42/50  Train 37.9258  Val 0.9015  0.4s
Epoch 43/50  Train 37.8601  Val 0.8998  0.4s
Epoch 44/50  Train 37.7963  Val 0.8981  0.4s
Epoch 45/50  Train 37.7343  Val 0.8965  0.4s
Epoch 46/50  Train 37.6739  Val 0.8949  0.4s
Epoch 47/50  Train 37.6150  Val 0.8934  0.4s
Epoch 48/50  Train 37.5574  Val 0.8920  0.4s
Epoch 49/50  Train 37.5011  Val 0.8905  0.4s
Epoch 50/50  Train 37.4460  Val 0.8891  0.4s

Parameters used in the single-fit model:
input_len: 60
output_len: 10
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.11241824
1 day(s) RMSE                      : 0.21327612
1 day(s) R2                        : 0.00343232
1 day(s) Pearson r                 : 0.37827363
1 day(s) QLIKE                     : 0.45527922
3 day(s) MAE                       : 0.11128690
3 day(s) RMSE                      : 0.21298418
3 day(s) R2                        : -0.00150860
3 day(s) Pearson r                 : 0.36543215
3 day(s) QLIKE                     : 0.44910316
5 day(s) MAE                       : 0.11131507
5 day(s) RMSE                      : 0.21338882
5 day(s) R2                        : -0.01674159
5 day(s) Pearson r                 : 0.35254399
5 day(s) QLIKE                     : 0.45279564
10 day(s) MAE                      : 0.11222074
10 day(s) RMSE                     : 0.21664850
10 day(s) R2                       : -0.05804605
10 day(s) Pearson r                : 0.32289727
10 day(s) QLIKE                    : 0.46333618
full horizon MAE                   : 0.11222074
full horizon RMSE                  : 0.21664850
full horizon R2                    : -0.05804605
full horizon Pearson r             : 0.32289727
full horizon QLIKE                 : 0.46333618

--- Task 2 ---
1 day(s) MAE                       : 0.15561729
1 day(s) RMSE                      : 0.31160571
1 day(s) R2                        : -0.33058665
1 day(s) Pearson r                 : -0.03785973
1 day(s) QLIKE                     : 17.22103285
3 day(s) MAE                       : 0.15582354
3 day(s) RMSE                      : 0.31462757
3 day(s) R2                        : -0.37270132
3 day(s) Pearson r                 : 0.04505178
3 day(s) QLIKE                     : 19.88995231
5 day(s) MAE                       : 0.15894415
5 day(s) RMSE                      : 0.36479184
5 day(s) R2                        : -0.85439683
5 day(s) Pearson r                 : 0.01876336
5 day(s) QLIKE                     : 19.37891985
10 day(s) MAE                      : 0.15588827
10 day(s) RMSE                     : 0.33725861
10 day(s) R2                       : -0.59177874
10 day(s) Pearson r                : 0.01448535
10 day(s) QLIKE                    : 18.88679584
full horizon MAE                   : 0.15588827
full horizon RMSE                  : 0.33725861
full horizon R2                    : -0.59177874
full horizon Pearson r             : 0.01448535
full horizon QLIKE                 : 18.88679584

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/EURUSD/ITransformer_H10.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.0371635, max=1.65787

=== EURUSD | H=20 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 3782
Time steps for y: 20
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525526
  Min value:  -6.065318026131415
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.998957169490096
  Min value:  -3.7200073694899167
Checking X_price_val:
Shape: (302, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.40892116517157
  Min value:  -3.843231093045373
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.321220211077452
  Min value:  -2.97689474283611
Checking X_price_test:
Shape: (757, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144347
  Min value:  -4.219325170615048
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3856309038737455
  Min value:  -5.35233858226595
Epoch 1/50  Train 54.0597  Val 1.3526  0.5s
Epoch 2/50  Train 52.8202  Val 1.3182  0.4s
Epoch 3/50  Train 51.6931  Val 1.2867  0.4s
Epoch 4/50  Train 50.6634  Val 1.2582  0.4s
Epoch 5/50  Train 49.7266  Val 1.2322  0.4s
Epoch 6/50  Train 48.8733  Val 1.2085  0.4s
Epoch 7/50  Train 48.0942  Val 1.1869  0.4s
Epoch 8/50  Train 47.3806  Val 1.1670  0.4s
Epoch 9/50  Train 46.7251  Val 1.1486  0.4s
Epoch 10/50  Train 46.1216  Val 1.1318  0.4s
Epoch 11/50  Train 45.5646  Val 1.1161  0.4s
Epoch 12/50  Train 45.0500  Val 1.1017  0.4s
Epoch 13/50  Train 44.5740  Val 1.0884  0.6s
Epoch 14/50  Train 44.1335  Val 1.0760  0.5s
Epoch 15/50  Train 43.7259  Val 1.0646  0.5s
Epoch 16/50  Train 43.3489  Val 1.0541  0.5s
Epoch 17/50  Train 43.0004  Val 1.0443  0.5s
Epoch 18/50  Train 42.6788  Val 1.0354  0.5s
Epoch 19/50  Train 42.3821  Val 1.0271  0.4s
Epoch 20/50  Train 42.1090  Val 1.0195  0.4s
Epoch 21/50  Train 41.8580  Val 1.0126  0.4s
Epoch 22/50  Train 41.6274  Val 1.0062  0.4s
Epoch 23/50  Train 41.4159  Val 1.0003  0.4s
Epoch 24/50  Train 41.2221  Val 0.9950  0.4s
Epoch 25/50  Train 41.0445  Val 0.9900  0.4s
Epoch 26/50  Train 40.8818  Val 0.9855  0.4s
Epoch 27/50  Train 40.7327  Val 0.9814  0.4s
Epoch 28/50  Train 40.5958  Val 0.9776  0.4s
Epoch 29/50  Train 40.4699  Val 0.9741  0.4s
Epoch 30/50  Train 40.3539  Val 0.9708  0.4s
Epoch 31/50  Train 40.2468  Val 0.9678  0.4s
Epoch 32/50  Train 40.1475  Val 0.9650  0.4s
Epoch 33/50  Train 40.0553  Val 0.9624  0.4s
Epoch 34/50  Train 39.9692  Val 0.9599  0.4s
Epoch 35/50  Train 39.8886  Val 0.9576  0.4s
Epoch 36/50  Train 39.8128  Val 0.9554  0.4s
Epoch 37/50  Train 39.7412  Val 0.9533  0.4s
Epoch 38/50  Train 39.6735  Val 0.9513  0.4s
Epoch 39/50  Train 39.6090  Val 0.9494  0.4s
Epoch 40/50  Train 39.5475  Val 0.9476  0.4s
Epoch 41/50  Train 39.4885  Val 0.9458  0.4s
Epoch 42/50  Train 39.4319  Val 0.9441  0.4s
Epoch 43/50  Train 39.3773  Val 0.9425  0.4s
Epoch 44/50  Train 39.3245  Val 0.9409  0.4s
Epoch 45/50  Train 39.2733  Val 0.9393  0.4s
Epoch 46/50  Train 39.2237  Val 0.9378  0.4s
Epoch 47/50  Train 39.1753  Val 0.9363  0.4s
Epoch 48/50  Train 39.1281  Val 0.9349  0.4s
Epoch 49/50  Train 39.0820  Val 0.9335  0.4s
Epoch 50/50  Train 39.0369  Val 0.9321  0.4s

Parameters used in the single-fit model:
input_len: 60
output_len: 20
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.11378915
1 day(s) RMSE                      : 0.21566529
1 day(s) R2                        : -0.01902035
1 day(s) Pearson r                 : 0.35257478
1 day(s) QLIKE                     : 0.47075334
3 day(s) MAE                       : 0.11293622
3 day(s) RMSE                      : 0.21400405
3 day(s) R2                        : -0.01112297
3 day(s) Pearson r                 : 0.35171329
3 day(s) QLIKE                     : 0.46418141
5 day(s) MAE                       : 0.11310347
5 day(s) RMSE                      : 0.21374879
5 day(s) R2                        : -0.02017480
5 day(s) Pearson r                 : 0.34083542
5 day(s) QLIKE                     : 0.47062534
10 day(s) MAE                      : 0.11369853
10 day(s) RMSE                     : 0.21565286
10 day(s) R2                       : -0.04834358
10 day(s) Pearson r                : 0.31625272
10 day(s) QLIKE                    : 0.47942926
20 day(s) MAE                      : 0.11481844
20 day(s) RMSE                     : 0.21794224
20 day(s) R2                       : -0.08334841
20 day(s) Pearson r                : 0.28339285
20 day(s) QLIKE                    : 0.48601505
full horizon MAE                   : 0.11481844
full horizon RMSE                  : 0.21794224
full horizon R2                    : -0.08334841
full horizon Pearson r             : 0.28339285
full horizon QLIKE                 : 0.48601505

--- Task 2 ---
1 day(s) MAE                       : 0.15577716
1 day(s) RMSE                      : 0.31169297
1 day(s) R2                        : -0.33133193
1 day(s) Pearson r                 : -0.01799058
1 day(s) QLIKE                     : 15.93345827
3 day(s) MAE                       : 0.15459587
3 day(s) RMSE                      : 0.30976333
3 day(s) R2                        : -0.33058466
3 day(s) Pearson r                 : 0.00610804
3 day(s) QLIKE                     : 19.19536312
5 day(s) MAE                       : 0.15409403
5 day(s) RMSE                      : 0.30893926
5 day(s) R2                        : -0.33002148
5 day(s) Pearson r                 : 0.00308210
5 day(s) QLIKE                     : 18.67517869
10 day(s) MAE                      : 0.15356561
10 day(s) RMSE                     : 0.30853669
10 day(s) R2                       : -0.33220252
10 day(s) Pearson r                : 0.00831004
10 day(s) QLIKE                    : 18.27948040
20 day(s) MAE                      : 0.16129901
20 day(s) RMSE                     : 0.99332468
20 day(s) R2                       : -12.82686556
20 day(s) Pearson r                : -0.00479153
20 day(s) QLIKE                    : 17.81236997
full horizon MAE                   : 0.16129901
full horizon RMSE                  : 0.99332468
full horizon R2                    : -12.82686556
full horizon Pearson r             : -0.00479153
full horizon QLIKE                 : 17.81236997

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/EURUSD/ITransformer_H20.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.039297, max=1.67595

=== GOLD | H=1 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 5534
Time steps for y: 1
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  17.638767766110508
  Min value:  -9.898129597066724
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.283234900000937
  Min value:  -5.250298730157774
Checking X_price_val:
Shape: (443, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722397423
  Min value:  -6.160721883827869
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.7902797110229687
  Min value:  -2.1194734374756146
Checking X_price_test:
Shape: (1107, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698434
  Min value:  -4.182160059445359
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5405778347636003
  Min value:  -2.3606736150182788
Epoch 1/50  Train 78.1283  Val 0.9661  0.8s
Epoch 2/50  Train 74.8692  Val 0.9403  0.6s
Epoch 3/50  Train 72.2289  Val 0.9202  0.6s
Epoch 4/50  Train 70.1923  Val 0.9044  0.6s
Epoch 5/50  Train 68.6266  Val 0.8917  0.6s
Epoch 6/50  Train 67.4021  Val 0.8813  0.6s
Epoch 7/50  Train 66.4230  Val 0.8726  0.6s
Epoch 8/50  Train 65.6218  Val 0.8652  0.6s
Epoch 9/50  Train 64.9510  Val 0.8589  0.6s
Epoch 10/50  Train 64.3770  Val 0.8533  0.8s
Epoch 11/50  Train 63.8757  Val 0.8484  0.7s
Epoch 12/50  Train 63.4299  Val 0.8440  0.8s
Epoch 13/50  Train 63.0272  Val 0.8400  0.7s
Epoch 14/50  Train 62.6583  Val 0.8364  0.6s
Epoch 15/50  Train 62.3163  Val 0.8330  0.6s
Epoch 16/50  Train 61.9962  Val 0.8299  0.6s
Epoch 17/50  Train 61.6938  Val 0.8270  0.6s
Epoch 18/50  Train 61.4063  Val 0.8243  0.7s
Epoch 19/50  Train 61.1312  Val 0.8218  0.6s
Epoch 20/50  Train 60.8667  Val 0.8194  0.6s
Epoch 21/50  Train 60.6112  Val 0.8172  0.6s
Epoch 22/50  Train 60.3636  Val 0.8151  0.6s
Epoch 23/50  Train 60.1228  Val 0.8131  0.6s
Epoch 24/50  Train 59.8880  Val 0.8112  0.6s
Epoch 25/50  Train 59.6585  Val 0.8094  0.6s
Epoch 26/50  Train 59.4338  Val 0.8076  0.6s
Epoch 27/50  Train 59.2134  Val 0.8060  0.6s
Epoch 28/50  Train 58.9969  Val 0.8044  0.6s
Epoch 29/50  Train 58.7839  Val 0.8028  0.6s
Epoch 30/50  Train 58.5741  Val 0.8014  0.6s
Epoch 31/50  Train 58.3673  Val 0.7999  0.6s
Epoch 32/50  Train 58.1633  Val 0.7986  0.6s
Epoch 33/50  Train 57.9618  Val 0.7973  0.6s
Epoch 34/50  Train 57.7627  Val 0.7960  0.6s
Epoch 35/50  Train 57.5659  Val 0.7947  0.6s
Epoch 36/50  Train 57.3711  Val 0.7935  0.6s
Epoch 37/50  Train 57.1784  Val 0.7924  0.6s
Epoch 38/50  Train 56.9875  Val 0.7912  0.6s
Epoch 39/50  Train 56.7985  Val 0.7901  0.6s
Epoch 40/50  Train 56.6112  Val 0.7891  0.6s
Epoch 41/50  Train 56.4256  Val 0.7880  0.6s
Epoch 42/50  Train 56.2415  Val 0.7870  0.6s
Epoch 43/50  Train 56.0591  Val 0.7861  0.6s
Epoch 44/50  Train 55.8781  Val 0.7851  0.6s
Epoch 45/50  Train 55.6987  Val 0.7842  0.6s
Epoch 46/50  Train 55.5206  Val 0.7833  0.6s
Epoch 47/50  Train 55.3441  Val 0.7824  0.6s
Epoch 48/50  Train 55.1689  Val 0.7816  0.6s
Epoch 49/50  Train 54.9951  Val 0.7807  0.6s
Epoch 50/50  Train 54.8227  Val 0.7799  0.6s

Parameters used in the single-fit model:
input_len: 60
output_len: 1
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.46983714
1 day(s) RMSE                      : 0.75653242
1 day(s) R2                        : -0.33276814
1 day(s) Pearson r                 : 0.31732394
1 day(s) QLIKE                     : 0.49017776
full horizon MAE                   : 0.46983714
full horizon RMSE                  : 0.75653242
full horizon R2                    : -0.33276814
full horizon Pearson r             : 0.31732394
full horizon QLIKE                 : 0.49017776

--- Task 2 ---
1 day(s) MAE                       : 0.32657440
1 day(s) RMSE                      : 0.61959390
1 day(s) R2                        : -0.38332829
1 day(s) Pearson r                 : 0.02324505
1 day(s) QLIKE                     : 20.28863665
full horizon MAE                   : 0.32657440
full horizon RMSE                  : 0.61959390
full horizon R2                    : -0.38332829
full horizon Pearson r             : 0.02324505
full horizon QLIKE                 : 20.28863665

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/GOLD/ITransformer_H1.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.145447, max=1.04788

=== GOLD | H=5 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 5534
Time steps for y: 5
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  17.638767766110508
  Min value:  -9.898129597066724
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.2836777458630495
  Min value:  -5.250623802304656
Checking X_price_val:
Shape: (443, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722397423
  Min value:  -6.160721883827869
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.790672308331636
  Min value:  -2.1194793742415055
Checking X_price_test:
Shape: (1107, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698434
  Min value:  -4.182160059445359
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5409449791338365
  Min value:  -2.3607041381163207
Epoch 1/50  Train 82.4484  Val 0.9904  0.9s
Epoch 2/50  Train 80.1410  Val 0.9707  0.6s
Epoch 3/50  Train 77.9792  Val 0.9536  0.6s
Epoch 4/50  Train 76.0676  Val 0.9390  0.6s
Epoch 5/50  Train 74.4107  Val 0.9265  0.6s
Epoch 6/50  Train 72.9787  Val 0.9156  0.6s
Epoch 7/50  Train 71.7381  Val 0.9062  0.6s
Epoch 8/50  Train 70.6588  Val 0.8978  0.6s
Epoch 9/50  Train 69.7151  Val 0.8905  0.8s
Epoch 10/50  Train 68.8855  Val 0.8840  0.8s
Epoch 11/50  Train 68.1522  Val 0.8782  0.7s
Epoch 12/50  Train 67.5004  Val 0.8730  0.8s
Epoch 13/50  Train 66.9179  Val 0.8683  0.6s
Epoch 14/50  Train 66.3943  Val 0.8640  0.6s
Epoch 15/50  Train 65.9213  Val 0.8602  0.6s
Epoch 16/50  Train 65.4917  Val 0.8567  0.6s
Epoch 17/50  Train 65.0996  Val 0.8535  0.6s
Epoch 18/50  Train 64.7399  Val 0.8506  0.6s
Epoch 19/50  Train 64.4085  Val 0.8479  0.7s
Epoch 20/50  Train 64.1016  Val 0.8454  0.6s
Epoch 21/50  Train 63.8164  Val 0.8431  0.6s
Epoch 22/50  Train 63.5501  Val 0.8410  0.6s
Epoch 23/50  Train 63.3005  Val 0.8390  0.6s
Epoch 24/50  Train 63.0658  Val 0.8372  0.6s
Epoch 25/50  Train 62.8442  Val 0.8355  0.6s
Epoch 26/50  Train 62.6343  Val 0.8339  0.6s
Epoch 27/50  Train 62.4350  Val 0.8324  0.7s
Epoch 28/50  Train 62.2450  Val 0.8310  0.6s
Epoch 29/50  Train 62.0635  Val 0.8297  0.6s
Epoch 30/50  Train 61.8897  Val 0.8285  0.6s
Epoch 31/50  Train 61.7227  Val 0.8273  0.6s
Epoch 32/50  Train 61.5620  Val 0.8262  0.7s
Epoch 33/50  Train 61.4069  Val 0.8252  0.6s
Epoch 34/50  Train 61.2570  Val 0.8242  0.7s
Epoch 35/50  Train 61.1118  Val 0.8232  0.7s
Epoch 36/50  Train 60.9709  Val 0.8223  0.7s
Epoch 37/50  Train 60.8339  Val 0.8215  0.7s
Epoch 38/50  Train 60.7006  Val 0.8207  0.7s
Epoch 39/50  Train 60.5705  Val 0.8199  0.7s
Epoch 40/50  Train 60.4435  Val 0.8191  0.7s
Epoch 41/50  Train 60.3194  Val 0.8184  0.6s
Epoch 42/50  Train 60.1978  Val 0.8177  0.7s
Epoch 43/50  Train 60.0787  Val 0.8171  0.7s
Epoch 44/50  Train 59.9618  Val 0.8164  0.6s
Epoch 45/50  Train 59.8469  Val 0.8158  0.7s
Epoch 46/50  Train 59.7340  Val 0.8152  0.7s
Epoch 47/50  Train 59.6229  Val 0.8147  0.7s
Epoch 48/50  Train 59.5134  Val 0.8141  0.7s
Epoch 49/50  Train 59.4055  Val 0.8136  0.7s
Epoch 50/50  Train 59.2990  Val 0.8131  0.6s

Parameters used in the single-fit model:
input_len: 60
output_len: 5
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.47903853
1 day(s) RMSE                      : 0.76553197
1 day(s) R2                        : -0.36466540
1 day(s) Pearson r                 : 0.26455793
1 day(s) QLIKE                     : 0.50596971
3 day(s) MAE                       : 0.47875015
3 day(s) RMSE                      : 0.76667549
3 day(s) R2                        : -0.36760247
3 day(s) Pearson r                 : 0.26202412
3 day(s) QLIKE                     : 0.50201074
5 day(s) MAE                       : 0.48050772
5 day(s) RMSE                      : 0.76915910
5 day(s) R2                        : -0.37256129
5 day(s) Pearson r                 : 0.25031910
5 day(s) QLIKE                     : 0.50532984
full horizon MAE                   : 0.48050772
full horizon RMSE                  : 0.76915910
full horizon R2                    : -0.37256129
full horizon Pearson r             : 0.25031910
full horizon QLIKE                 : 0.50532984

--- Task 2 ---
1 day(s) MAE                       : 0.32668954
1 day(s) RMSE                      : 0.61977874
1 day(s) R2                        : -0.38415377
1 day(s) Pearson r                 : -0.02965635
1 day(s) QLIKE                     : 18.53727944
3 day(s) MAE                       : 0.32657770
3 day(s) RMSE                      : 0.61967998
3 day(s) R2                        : -0.38384500
3 day(s) Pearson r                 : 0.01159551
3 day(s) QLIKE                     : 18.70344641
5 day(s) MAE                       : 0.32786677
5 day(s) RMSE                      : 0.62225947
5 day(s) R2                        : -0.38382550
5 day(s) Pearson r                 : 0.00988120
5 day(s) QLIKE                     : 18.31819188
full horizon MAE                   : 0.32786677
full horizon RMSE                  : 0.62225947
full horizon R2                    : -0.38382550
full horizon Pearson r             : 0.00988120
full horizon QLIKE                 : 18.31819188

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/GOLD/ITransformer_H5.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.0897933, max=1.17833

=== GOLD | H=10 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 5534
Time steps for y: 10
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  17.638767766110508
  Min value:  -9.898129597066724
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.285154362272003
  Min value:  -5.254244390683287
Checking X_price_val:
Shape: (443, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722397423
  Min value:  -6.160721883827869
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.7918153902171274
  Min value:  -2.1209816396054144
Checking X_price_test:
Shape: (1107, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698434
  Min value:  -4.182160059445359
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5419191122017313
  Min value:  -2.362369600030494
Epoch 1/50  Train 82.7117  Val 0.9736  0.8s
Epoch 2/50  Train 80.9526  Val 0.9583  0.7s
Epoch 3/50  Train 79.2458  Val 0.9446  0.6s
Epoch 4/50  Train 77.6769  Val 0.9325  0.7s
Epoch 5/50  Train 76.2639  Val 0.9219  0.7s
Epoch 6/50  Train 74.9982  Val 0.9124  0.7s
Epoch 7/50  Train 73.8655  Val 0.9041  0.6s
Epoch 8/50  Train 72.8511  Val 0.8967  0.7s
Epoch 9/50  Train 71.9410  Val 0.8900  0.9s
Epoch 10/50  Train 71.1227  Val 0.8841  0.7s
Epoch 11/50  Train 70.3850  Val 0.8787  0.8s
Epoch 12/50  Train 69.7181  Val 0.8738  0.7s
Epoch 13/50  Train 69.1135  Val 0.8695  0.6s
Epoch 14/50  Train 68.5637  Val 0.8654  0.6s
Epoch 15/50  Train 68.0621  Val 0.8618  0.6s
Epoch 16/50  Train 67.6030  Val 0.8584  0.6s
Epoch 17/50  Train 67.1817  Val 0.8553  0.6s
Epoch 18/50  Train 66.7937  Val 0.8525  0.6s
Epoch 19/50  Train 66.4353  Val 0.8498  0.6s
Epoch 20/50  Train 66.1033  Val 0.8473  0.6s
Epoch 21/50  Train 65.7949  Val 0.8450  0.7s
Epoch 22/50  Train 65.5075  Val 0.8429  0.7s
Epoch 23/50  Train 65.2391  Val 0.8409  0.6s
Epoch 24/50  Train 64.9875  Val 0.8390  0.7s
Epoch 25/50  Train 64.7513  Val 0.8373  0.7s
Epoch 26/50  Train 64.5289  Val 0.8356  0.7s
Epoch 27/50  Train 64.3189  Val 0.8341  0.7s
Epoch 28/50  Train 64.1202  Val 0.8326  0.7s
Epoch 29/50  Train 63.9318  Val 0.8313  0.7s
Epoch 30/50  Train 63.7527  Val 0.8300  0.7s
Epoch 31/50  Train 63.5821  Val 0.8288  0.7s
Epoch 32/50  Train 63.4193  Val 0.8276  0.7s
Epoch 33/50  Train 63.2635  Val 0.8265  0.7s
Epoch 34/50  Train 63.1142  Val 0.8255  0.7s
Epoch 35/50  Train 62.9708  Val 0.8246  0.6s
Epoch 36/50  Train 62.8329  Val 0.8236  0.6s
Epoch 37/50  Train 62.6999  Val 0.8228  0.7s
Epoch 38/50  Train 62.5716  Val 0.8220  0.6s
Epoch 39/50  Train 62.4474  Val 0.8212  0.7s
Epoch 40/50  Train 62.3271  Val 0.8205  0.7s
Epoch 41/50  Train 62.2103  Val 0.8198  0.7s
Epoch 42/50  Train 62.0969  Val 0.8191  0.6s
Epoch 43/50  Train 61.9864  Val 0.8185  0.7s
Epoch 44/50  Train 61.8788  Val 0.8179  0.6s
Epoch 45/50  Train 61.7737  Val 0.8173  0.6s
Epoch 46/50  Train 61.6710  Val 0.8167  0.7s
Epoch 47/50  Train 61.5706  Val 0.8162  0.7s
Epoch 48/50  Train 61.4721  Val 0.8157  0.7s
Epoch 49/50  Train 61.3755  Val 0.8152  0.7s
Epoch 50/50  Train 61.2807  Val 0.8148  0.6s

Parameters used in the single-fit model:
input_len: 60
output_len: 10
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.48273984
1 day(s) RMSE                      : 0.76824331
1 day(s) R2                        : -0.37434920
1 day(s) Pearson r                 : 0.23311930
1 day(s) QLIKE                     : 0.51466694
3 day(s) MAE                       : 0.48156643
3 day(s) RMSE                      : 0.76896014
3 day(s) R2                        : -0.37576538
3 day(s) Pearson r                 : 0.24426101
3 day(s) QLIKE                     : 0.50747199
5 day(s) MAE                       : 0.48392906
5 day(s) RMSE                      : 0.77218507
5 day(s) R2                        : -0.38338219
5 day(s) Pearson r                 : 0.23003641
5 day(s) QLIKE                     : 0.51119606
10 day(s) MAE                      : 0.48671746
10 day(s) RMSE                     : 0.77803664
10 day(s) R2                       : -0.38876230
10 day(s) Pearson r                : 0.20796057
10 day(s) QLIKE                    : 0.51971154
full horizon MAE                   : 0.48671746
full horizon RMSE                  : 0.77803664
full horizon R2                    : -0.38876230
full horizon Pearson r             : 0.20796057
full horizon QLIKE                 : 0.51971154

--- Task 2 ---
1 day(s) MAE                       : 0.32659310
1 day(s) RMSE                      : 0.61976655
1 day(s) R2                        : -0.38409934
1 day(s) Pearson r                 : -0.02792015
1 day(s) QLIKE                     : 19.53967183
3 day(s) MAE                       : 0.32660520
3 day(s) RMSE                      : 0.61975809
3 day(s) R2                        : -0.38419390
3 day(s) Pearson r                 : -0.00317298
3 day(s) QLIKE                     : 18.63991057
5 day(s) MAE                       : 0.32788277
5 day(s) RMSE                      : 0.62230439
5 day(s) R2                        : -0.38402530
5 day(s) Pearson r                 : 0.00482389
5 day(s) QLIKE                     : 18.19305753
10 day(s) MAE                      : 0.32833387
10 day(s) RMSE                     : 0.62372469
10 day(s) R2                       : -0.38297029
10 day(s) Pearson r                : 0.02230478
10 day(s) QLIKE                    : 18.37036808
full horizon MAE                   : 0.32833387
full horizon RMSE                  : 0.62372469
full horizon R2                    : -0.38297029
full horizon Pearson r             : 0.02230478
full horizon QLIKE                 : 18.37036808

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/GOLD/ITransformer_H10.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.0863574, max=1.02075

=== GOLD | H=20 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 5534
Time steps for y: 20
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  17.638767766110508
  Min value:  -9.898129597066724
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.287022865369746
  Min value:  -5.257736685381158
Checking X_price_val:
Shape: (443, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722397423
  Min value:  -6.160721883827869
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.7933331106383543
  Min value:  -2.122246065928174
Checking X_price_test:
Shape: (1107, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698434
  Min value:  -4.182160059445359
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5432591469134986
  Min value:  -2.363805662326961
Epoch 1/50  Train 85.5811  Val 1.0068  0.7s
Epoch 2/50  Train 84.0044  Val 0.9919  0.7s
Epoch 3/50  Train 82.4401  Val 0.9782  0.6s
Epoch 4/50  Train 80.9704  Val 0.9659  0.6s
Epoch 5/50  Train 79.6161  Val 0.9548  0.6s
Epoch 6/50  Train 78.3748  Val 0.9448  0.6s
Epoch 7/50  Train 77.2387  Val 0.9358  0.6s
Epoch 8/50  Train 76.1987  Val 0.9276  0.6s
Epoch 9/50  Train 75.2457  Val 0.9202  0.8s
Epoch 10/50  Train 74.3711  Val 0.9133  0.8s
Epoch 11/50  Train 73.5674  Val 0.9071  0.7s
Epoch 12/50  Train 72.8275  Val 0.9013  0.8s
Epoch 13/50  Train 72.1453  Val 0.8959  0.6s
Epoch 14/50  Train 71.5155  Val 0.8909  0.6s
Epoch 15/50  Train 70.9332  Val 0.8862  0.7s
Epoch 16/50  Train 70.3942  Val 0.8818  0.6s
Epoch 17/50  Train 69.8948  Val 0.8777  0.6s
Epoch 18/50  Train 69.4316  Val 0.8738  0.7s
Epoch 19/50  Train 69.0018  Val 0.8701  0.7s
Epoch 20/50  Train 68.6027  Val 0.8666  0.6s
Epoch 21/50  Train 68.2319  Val 0.8633  0.6s
Epoch 22/50  Train 67.8873  Val 0.8602  0.6s
Epoch 23/50  Train 67.5670  Val 0.8573  0.7s
Epoch 24/50  Train 67.2693  Val 0.8545  0.6s
Epoch 25/50  Train 66.9925  Val 0.8518  0.6s
Epoch 26/50  Train 66.7351  Val 0.8493  0.7s
Epoch 27/50  Train 66.4958  Val 0.8470  0.6s
Epoch 28/50  Train 66.2732  Val 0.8447  0.6s
Epoch 29/50  Train 66.0662  Val 0.8426  0.6s
Epoch 30/50  Train 65.8734  Val 0.8407  0.6s
Epoch 31/50  Train 65.6939  Val 0.8388  0.7s
Epoch 32/50  Train 65.5266  Val 0.8371  0.6s
Epoch 33/50  Train 65.3704  Val 0.8355  0.7s
Epoch 34/50  Train 65.2246  Val 0.8339  0.7s
Epoch 35/50  Train 65.0881  Val 0.8325  0.7s
Epoch 36/50  Train 64.9601  Val 0.8312  0.6s
Epoch 37/50  Train 64.8399  Val 0.8300  0.6s
Epoch 38/50  Train 64.7267  Val 0.8289  0.7s
Epoch 39/50  Train 64.6198  Val 0.8278  0.6s
Epoch 40/50  Train 64.5187  Val 0.8269  0.6s
Epoch 41/50  Train 64.4228  Val 0.8260  0.7s
Epoch 42/50  Train 64.3315  Val 0.8252  0.7s
Epoch 43/50  Train 64.2444  Val 0.8244  0.7s
Epoch 44/50  Train 64.1610  Val 0.8237  0.7s
Epoch 45/50  Train 64.0809  Val 0.8231  0.7s
Epoch 46/50  Train 64.0039  Val 0.8225  0.6s
Epoch 47/50  Train 63.9295  Val 0.8220  0.7s
Epoch 48/50  Train 63.8574  Val 0.8215  0.6s
Epoch 49/50  Train 63.7875  Val 0.8210  0.7s
Epoch 50/50  Train 63.7195  Val 0.8206  0.7s

Parameters used in the single-fit model:
input_len: 60
output_len: 20
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.48883442
1 day(s) RMSE                      : 0.77494206
1 day(s) R2                        : -0.39842114
1 day(s) Pearson r                 : 0.18486377
1 day(s) QLIKE                     : 0.52188202
3 day(s) MAE                       : 0.48665862
3 day(s) RMSE                      : 0.77388099
3 day(s) R2                        : -0.39342975
3 day(s) Pearson r                 : 0.22077202
3 day(s) QLIKE                     : 0.51202520
5 day(s) MAE                       : 0.48832853
5 day(s) RMSE                      : 0.77634676
5 day(s) R2                        : -0.39833386
5 day(s) Pearson r                 : 0.20528554
5 day(s) QLIKE                     : 0.51617973
10 day(s) MAE                      : 0.49154218
10 day(s) RMSE                     : 0.78257928
10 day(s) R2                       : -0.40502648
10 day(s) Pearson r                : 0.17813990
10 day(s) QLIKE                    : 0.52541978
20 day(s) MAE                      : 0.49832608
20 day(s) RMSE                     : 0.79234265
20 day(s) R2                       : -0.41904473
20 day(s) Pearson r                : 0.12970564
20 day(s) QLIKE                    : 0.53874714
full horizon MAE                   : 0.49832608
full horizon RMSE                  : 0.79234265
full horizon R2                    : -0.41904473
full horizon Pearson r             : 0.12970564
full horizon QLIKE                 : 0.53874714

--- Task 2 ---
1 day(s) MAE                       : 0.32651673
1 day(s) RMSE                      : 0.61977992
1 day(s) R2                        : -0.38415902
1 day(s) Pearson r                 : -0.01403945
1 day(s) QLIKE                     : 17.46685909
3 day(s) MAE                       : 0.32660005
3 day(s) RMSE                      : 0.61979392
3 day(s) R2                        : -0.38435394
3 day(s) Pearson r                 : -0.00967951
3 day(s) QLIKE                     : 16.65282808
5 day(s) MAE                       : 0.32788889
5 day(s) RMSE                      : 0.62234032
5 day(s) R2                        : -0.38418515
5 day(s) Pearson r                 : -0.00689891
5 day(s) QLIKE                     : 16.37985976
10 day(s) MAE                      : 0.32835496
10 day(s) RMSE                     : 0.62378425
10 day(s) R2                       : -0.38323441
10 day(s) Pearson r                : 0.00031534
10 day(s) QLIKE                    : 16.33060003
20 day(s) MAE                      : 0.32935175
20 day(s) RMSE                     : 0.62600993
20 day(s) R2                       : -0.38266394
20 day(s) Pearson r                : 0.01162598
20 day(s) QLIKE                    : 16.44055496
full horizon MAE                   : 0.32935175
full horizon RMSE                  : 0.62600993
full horizon R2                    : -0.38266394
full horizon Pearson r             : 0.01162598
full horizon QLIKE                 : 16.44055496

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/GOLD/ITransformer_H20.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.0802805, max=0.910797

=== SP500 | H=1 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 3466
Time steps for y: 1
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790614
  Min value:  -11.560339011034328
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.5981030806998047
  Min value:  -18.186962127218408
Checking X_price_val:
Shape: (277, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.583786604265051
  Min value:  -4.793129353735297
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3850009093345803
  Min value:  -2.4181466917899535
Checking X_price_test:
Shape: (694, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.658768610066629
  Min value:  -6.142887457561203
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 1, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.3795163284544945
  Min value:  -3.2441546953028637
Epoch 1/50  Train 45.1144  Val 1.2804  0.5s
Epoch 2/50  Train 43.7581  Val 1.2542  0.4s
Epoch 3/50  Train 42.7944  Val 1.2323  0.4s
Epoch 4/50  Train 41.9874  Val 1.2135  0.4s
Epoch 5/50  Train 41.2992  Val 1.1972  0.4s
Epoch 6/50  Train 40.7050  Val 1.1830  0.4s
Epoch 7/50  Train 40.1870  Val 1.1706  0.4s
Epoch 8/50  Train 39.7315  Val 1.1596  0.4s
Epoch 9/50  Train 39.3282  Val 1.1500  0.4s
Epoch 10/50  Train 38.9687  Val 1.1414  0.4s
Epoch 11/50  Train 38.6463  Val 1.1339  0.4s
Epoch 12/50  Train 38.3556  Val 1.1271  0.4s
Epoch 13/50  Train 38.0921  Val 1.1212  0.4s
Epoch 14/50  Train 37.8521  Val 1.1158  0.4s
Epoch 15/50  Train 37.6325  Val 1.1111  0.4s
Epoch 16/50  Train 37.4307  Val 1.1068  0.4s
Epoch 17/50  Train 37.2446  Val 1.1030  0.4s
Epoch 18/50  Train 37.0722  Val 1.0996  0.4s
Epoch 19/50  Train 36.9120  Val 1.0966  0.4s
Epoch 20/50  Train 36.7627  Val 1.0938  0.5s
Epoch 21/50  Train 36.6229  Val 1.0913  0.5s
Epoch 22/50  Train 36.4918  Val 1.0891  0.4s
Epoch 23/50  Train 36.3684  Val 1.0871  0.5s
Epoch 24/50  Train 36.2520  Val 1.0852  0.4s
Epoch 25/50  Train 36.1419  Val 1.0836  0.5s
Epoch 26/50  Train 36.0374  Val 1.0821  0.5s
Epoch 27/50  Train 35.9382  Val 1.0807  0.4s
Epoch 28/50  Train 35.8437  Val 1.0794  0.4s
Epoch 29/50  Train 35.7535  Val 1.0783  0.4s
Epoch 30/50  Train 35.6672  Val 1.0773  0.4s
Epoch 31/50  Train 35.5845  Val 1.0763  0.4s
Epoch 32/50  Train 35.5052  Val 1.0755  0.4s
Epoch 33/50  Train 35.4290  Val 1.0747  0.4s
Epoch 34/50  Train 35.3555  Val 1.0739  0.4s
Epoch 35/50  Train 35.2847  Val 1.0733  0.4s
Epoch 36/50  Train 35.2162  Val 1.0727  0.4s
Epoch 37/50  Train 35.1500  Val 1.0721  0.4s
Epoch 38/50  Train 35.0859  Val 1.0716  0.4s
Epoch 39/50  Train 35.0236  Val 1.0711  0.4s
Epoch 40/50  Train 34.9632  Val 1.0706  0.4s
Epoch 41/50  Train 34.9043  Val 1.0702  0.4s
Epoch 42/50  Train 34.8470  Val 1.0698  0.4s
Epoch 43/50  Train 34.7910  Val 1.0695  0.4s
Epoch 44/50  Train 34.7364  Val 1.0691  0.4s
Epoch 45/50  Train 34.6830  Val 1.0688  0.4s
Epoch 46/50  Train 34.6307  Val 1.0685  0.4s
Epoch 47/50  Train 34.5794  Val 1.0682  0.4s
Epoch 48/50  Train 34.5290  Val 1.0680  0.4s
Epoch 49/50  Train 34.4796  Val 1.0677  0.4s
Epoch 50/50  Train 34.4309  Val 1.0675  0.4s

Parameters used in the single-fit model:
input_len: 60
output_len: 1
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.61160013
1 day(s) RMSE                      : 2.65623362
1 day(s) R2                        : 0.12007411
1 day(s) Pearson r                 : 0.38617106
1 day(s) QLIKE                     : 0.70871706
full horizon MAE                   : 0.61160013
full horizon RMSE                  : 2.65623362
full horizon R2                    : 0.12007411
full horizon Pearson r             : 0.38617106
full horizon QLIKE                 : 0.70871706

--- Task 2 ---
1 day(s) MAE                       : 0.36389051
1 day(s) RMSE                      : 1.47529254
1 day(s) R2                        : -4.45668995
1 day(s) Pearson r                 : -0.01896530
1 day(s) QLIKE                     : 20.42307283
full horizon MAE                   : 0.36389051
full horizon RMSE                  : 1.47529254
full horizon R2                    : -4.45668995
full horizon Pearson r             : -0.01896530
full horizon QLIKE                 : 20.42307283

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/SP500/ITransformer_H1.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.250041, max=14.2538

=== SP500 | H=5 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 3466
Time steps for y: 5
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790614
  Min value:  -11.560339011034328
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.5969074867518915
  Min value:  -18.18009964581938
Checking X_price_val:
Shape: (277, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.583786604265051
  Min value:  -4.793129353735297
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.384254029613919
  Min value:  -2.417116935778614
Checking X_price_test:
Shape: (694, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.658768610066629
  Min value:  -6.142887457561203
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 5, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.3784015875360596
  Min value:  -3.2428194072829686
Epoch 1/50  Train 47.6498  Val 1.3128  0.5s
Epoch 2/50  Train 46.7132  Val 1.2950  0.4s
Epoch 3/50  Train 45.9787  Val 1.2795  0.4s
Epoch 4/50  Train 45.3150  Val 1.2656  0.4s
Epoch 5/50  Train 44.7104  Val 1.2531  0.4s
Epoch 6/50  Train 44.1572  Val 1.2417  0.4s
Epoch 7/50  Train 43.6498  Val 1.2314  0.4s
Epoch 8/50  Train 43.1831  Val 1.2220  0.4s
Epoch 9/50  Train 42.7531  Val 1.2134  0.4s
Epoch 10/50  Train 42.3562  Val 1.2055  0.4s
Epoch 11/50  Train 41.9892  Val 1.1982  0.4s
Epoch 12/50  Train 41.6493  Val 1.1915  0.4s
Epoch 13/50  Train 41.3341  Val 1.1853  0.5s
Epoch 14/50  Train 41.0413  Val 1.1796  0.5s
Epoch 15/50  Train 40.7689  Val 1.1743  0.5s
Epoch 16/50  Train 40.5151  Val 1.1694  0.5s
Epoch 17/50  Train 40.2783  Val 1.1648  0.5s
Epoch 18/50  Train 40.0569  Val 1.1605  0.5s
Epoch 19/50  Train 39.8496  Val 1.1566  0.4s
Epoch 20/50  Train 39.6553  Val 1.1529  0.4s
Epoch 21/50  Train 39.4727  Val 1.1494  0.4s
Epoch 22/50  Train 39.3010  Val 1.1461  0.4s
Epoch 23/50  Train 39.1392  Val 1.1430  0.4s
Epoch 24/50  Train 38.9864  Val 1.1402  0.4s
Epoch 25/50  Train 38.8420  Val 1.1374  0.4s
Epoch 26/50  Train 38.7053  Val 1.1349  0.4s
Epoch 27/50  Train 38.5756  Val 1.1324  0.4s
Epoch 28/50  Train 38.4524  Val 1.1301  0.4s
Epoch 29/50  Train 38.3352  Val 1.1280  0.4s
Epoch 30/50  Train 38.2235  Val 1.1259  0.4s
Epoch 31/50  Train 38.1170  Val 1.1240  0.4s
Epoch 32/50  Train 38.0152  Val 1.1221  0.4s
Epoch 33/50  Train 37.9177  Val 1.1203  0.4s
Epoch 34/50  Train 37.8244  Val 1.1186  0.4s
Epoch 35/50  Train 37.7348  Val 1.1170  0.4s
Epoch 36/50  Train 37.6488  Val 1.1155  0.4s
Epoch 37/50  Train 37.5661  Val 1.1140  0.4s
Epoch 38/50  Train 37.4864  Val 1.1127  0.4s
Epoch 39/50  Train 37.4097  Val 1.1113  0.4s
Epoch 40/50  Train 37.3356  Val 1.1101  0.4s
Epoch 41/50  Train 37.2640  Val 1.1088  0.4s
Epoch 42/50  Train 37.1948  Val 1.1077  0.4s
Epoch 43/50  Train 37.1279  Val 1.1066  0.4s
Epoch 44/50  Train 37.0630  Val 1.1055  0.4s
Epoch 45/50  Train 37.0002  Val 1.1045  0.4s
Epoch 46/50  Train 36.9392  Val 1.1035  0.4s
Epoch 47/50  Train 36.8799  Val 1.1026  0.4s
Epoch 48/50  Train 36.8223  Val 1.1017  0.4s
Epoch 49/50  Train 36.7663  Val 1.1008  0.4s
Epoch 50/50  Train 36.7118  Val 1.1000  0.4s

Parameters used in the single-fit model:
input_len: 60
output_len: 5
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.62554621
1 day(s) RMSE                      : 2.74048685
1 day(s) R2                        : 0.06336798
1 day(s) Pearson r                 : 0.30744566
1 day(s) QLIKE                     : 0.73823164
3 day(s) MAE                       : 0.63422960
3 day(s) RMSE                      : 2.78937172
3 day(s) R2                        : 0.02970153
3 day(s) Pearson r                 : 0.23340032
3 day(s) QLIKE                     : 0.77870776
5 day(s) MAE                       : 0.64390517
5 day(s) RMSE                      : 2.82267855
5 day(s) R2                        : 0.00644816
5 day(s) Pearson r                 : 0.18132912
5 day(s) QLIKE                     : 0.82592934
full horizon MAE                   : 0.64390517
full horizon RMSE                  : 2.82267855
full horizon R2                    : 0.00644816
full horizon Pearson r             : 0.18132912
full horizon QLIKE                 : 0.82592934

--- Task 2 ---
1 day(s) MAE                       : 0.45966821
1 day(s) RMSE                      : 3.88614302
1 day(s) R2                        : -36.86260990
1 day(s) Pearson r                 : -0.01892179
1 day(s) QLIKE                     : 21.05932861
3 day(s) MAE                       : 7.93635391
3 day(s) RMSE                      : 345.12490215
3 day(s) R2                        : -297234.14250704
3 day(s) Pearson r                 : 0.01003929
3 day(s) QLIKE                     : 21.36841122
5 day(s) MAE                       : 4.88857797
5 day(s) RMSE                      : 267.33297503
5 day(s) R2                        : -178167.28013541
5 day(s) Pearson r                 : 0.00776420
5 day(s) QLIKE                     : 21.32332631
full horizon MAE                   : 4.88857797
full horizon RMSE                  : 267.33297503
full horizon R2                    : -178167.28013541
full horizon Pearson r             : 0.00776420
full horizon QLIKE                 : 21.32332631

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/SP500/ITransformer_H5.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.241409, max=15.5996

=== SP500 | H=10 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 3466
Time steps for y: 10
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790614
  Min value:  -11.560339011034328
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.5969471269414797
  Min value:  -18.183354534705547
Checking X_price_val:
Shape: (277, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.583786604265051
  Min value:  -4.793129353735297
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3841102138286323
  Min value:  -2.4179871257691663
Checking X_price_test:
Shape: (694, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.658768610066629
  Min value:  -6.142887457561203
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 10, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.378408171115352
  Min value:  -3.2438145134653436
Epoch 1/50  Train 48.2602  Val 1.3476  0.5s
Epoch 2/50  Train 47.5359  Val 1.3326  0.4s
Epoch 3/50  Train 46.9548  Val 1.3192  0.4s
Epoch 4/50  Train 46.4170  Val 1.3068  0.4s
Epoch 5/50  Train 45.9158  Val 1.2954  0.4s
Epoch 6/50  Train 45.4473  Val 1.2848  0.4s
Epoch 7/50  Train 45.0086  Val 1.2750  0.5s
Epoch 8/50  Train 44.5970  Val 1.2657  0.4s
Epoch 9/50  Train 44.2104  Val 1.2571  0.4s
Epoch 10/50  Train 43.8468  Val 1.2490  0.5s
Epoch 11/50  Train 43.5047  Val 1.2414  0.4s
Epoch 12/50  Train 43.1823  Val 1.2342  0.4s
Epoch 13/50  Train 42.8784  Val 1.2274  0.5s
Epoch 14/50  Train 42.5916  Val 1.2211  0.5s
Epoch 15/50  Train 42.3207  Val 1.2150  0.4s
Epoch 16/50  Train 42.0647  Val 1.2093  0.5s
Epoch 17/50  Train 41.8226  Val 1.2039  0.5s
Epoch 18/50  Train 41.5934  Val 1.1988  0.5s
Epoch 19/50  Train 41.3763  Val 1.1940  0.4s
Epoch 20/50  Train 41.1705  Val 1.1894  0.4s
Epoch 21/50  Train 40.9752  Val 1.1850  0.4s
Epoch 22/50  Train 40.7899  Val 1.1808  0.4s
Epoch 23/50  Train 40.6137  Val 1.1768  0.4s
Epoch 24/50  Train 40.4463  Val 1.1730  0.4s
Epoch 25/50  Train 40.2869  Val 1.1694  0.4s
Epoch 26/50  Train 40.1351  Val 1.1660  0.4s
Epoch 27/50  Train 39.9904  Val 1.1627  0.4s
Epoch 28/50  Train 39.8524  Val 1.1595  0.4s
Epoch 29/50  Train 39.7206  Val 1.1565  0.4s
Epoch 30/50  Train 39.5947  Val 1.1536  0.4s
Epoch 31/50  Train 39.4743  Val 1.1508  0.4s
Epoch 32/50  Train 39.3591  Val 1.1482  0.4s
Epoch 33/50  Train 39.2487  Val 1.1456  0.4s
Epoch 34/50  Train 39.1429  Val 1.1432  0.4s
Epoch 35/50  Train 39.0414  Val 1.1408  0.4s
Epoch 36/50  Train 38.9440  Val 1.1386  0.4s
Epoch 37/50  Train 38.8504  Val 1.1364  0.4s
Epoch 38/50  Train 38.7603  Val 1.1343  0.4s
Epoch 39/50  Train 38.6737  Val 1.1323  0.4s
Epoch 40/50  Train 38.5903  Val 1.1304  0.4s
Epoch 41/50  Train 38.5098  Val 1.1286  0.4s
Epoch 42/50  Train 38.4323  Val 1.1268  0.4s
Epoch 43/50  Train 38.3574  Val 1.1251  0.4s
Epoch 44/50  Train 38.2851  Val 1.1234  0.4s
Epoch 45/50  Train 38.2152  Val 1.1218  0.4s
Epoch 46/50  Train 38.1476  Val 1.1203  0.4s
Epoch 47/50  Train 38.0821  Val 1.1189  0.4s
Epoch 48/50  Train 38.0187  Val 1.1174  0.4s
Epoch 49/50  Train 37.9573  Val 1.1161  0.4s
Epoch 50/50  Train 37.8977  Val 1.1148  0.4s

Parameters used in the single-fit model:
input_len: 60
output_len: 10
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.62029807
1 day(s) RMSE                      : 2.74175011
1 day(s) R2                        : 0.06250428
1 day(s) Pearson r                 : 0.31278666
1 day(s) QLIKE                     : 0.74311908
3 day(s) MAE                       : 0.63595289
3 day(s) RMSE                      : 2.80776236
3 day(s) R2                        : 0.01686478
3 day(s) Pearson r                 : 0.20567995
3 day(s) QLIKE                     : 0.79316359
5 day(s) MAE                       : 0.64571023
5 day(s) RMSE                      : 2.83464102
5 day(s) R2                        : -0.00199100
5 day(s) Pearson r                 : 0.15966235
5 day(s) QLIKE                     : 0.84332178
10 day(s) MAE                      : 0.65503985
10 day(s) RMSE                     : 2.85955321
10 day(s) R2                       : -0.01954228
10 day(s) Pearson r                : 0.11230385
10 day(s) QLIKE                    : 0.89542495
full horizon MAE                   : 0.65503985
full horizon RMSE                  : 2.85955321
full horizon R2                    : -0.01954228
full horizon Pearson r             : 0.11230385
full horizon QLIKE                 : 0.89542495

--- Task 2 ---
1 day(s) MAE                       : 0.32060680
1 day(s) RMSE                      : 0.72309081
1 day(s) R2                        : -0.31086713
1 day(s) Pearson r                 : -0.01887645
1 day(s) QLIKE                     : 22.73184950
3 day(s) MAE                       : 82.95266698
3 day(s) RMSE                      : 3764.83106388
3 day(s) R2                        : -35370283.15071109
3 day(s) Pearson r                 : 0.01010470
3 day(s) QLIKE                     : 22.29449948
5 day(s) MAE                       : 49.89843438
5 day(s) RMSE                      : 2916.22563671
5 day(s) R2                        : -21201547.02119072
5 day(s) Pearson r                 : 0.00781455
5 day(s) QLIKE                     : 21.95571357
10 day(s) MAE                      : 25.13548457
10 day(s) RMSE                     : 2062.08422510
10 day(s) R2                       : -10600244.92215646
10 day(s) Pearson r                : 0.00554911
10 day(s) QLIKE                    : 21.31745771
full horizon MAE                   : 25.13548457
full horizon RMSE                  : 2062.08422510
full horizon R2                    : -10600244.92215646
full horizon Pearson r             : 0.00554911
full horizon QLIKE                 : 21.31745771

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/SP500/ITransformer_H10.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.214759, max=15.9192

=== SP500 | H=20 | ITransformer (simple fit) | no_tasks=2 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
[ITransformerWrapper] Using device: mps
Batch size for y: 3466
Time steps for y: 20
Features for y: 2

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790614
  Min value:  -11.560339011034328
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.5963112311058953
  Min value:  -18.187442810157968
Checking X_price_val:
Shape: (277, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.583786604265051
  Min value:  -4.793129353735297
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3832820721048864
  Min value:  -2.4468344131842463
Checking X_price_test:
Shape: (694, 60, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.658768610066629
  Min value:  -6.142887457561203
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 20, 2)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.3777376348304156
  Min value:  -3.2455347334034057
Epoch 1/50  Train 49.9810  Val 1.4030  0.5s
Epoch 2/50  Train 49.3372  Val 1.3887  0.4s
Epoch 3/50  Train 48.8166  Val 1.3757  0.4s
Epoch 4/50  Train 48.3292  Val 1.3635  0.4s
Epoch 5/50  Train 47.8697  Val 1.3521  0.4s
Epoch 6/50  Train 47.4350  Val 1.3414  0.4s
Epoch 7/50  Train 47.0229  Val 1.3312  0.4s
Epoch 8/50  Train 46.6315  Val 1.3216  0.4s
Epoch 9/50  Train 46.2592  Val 1.3125  0.4s
Epoch 10/50  Train 45.9047  Val 1.3039  0.4s
Epoch 11/50  Train 45.5667  Val 1.2956  0.4s
Epoch 12/50  Train 45.2441  Val 1.2878  0.4s
Epoch 13/50  Train 44.9358  Val 1.2802  0.4s
Epoch 14/50  Train 44.6410  Val 1.2730  0.5s
Epoch 15/50  Train 44.3589  Val 1.2661  0.4s
Epoch 16/50  Train 44.0885  Val 1.2595  0.4s
Epoch 17/50  Train 43.8294  Val 1.2531  0.5s
Epoch 18/50  Train 43.5809  Val 1.2470  0.5s
Epoch 19/50  Train 43.3423  Val 1.2411  0.4s
Epoch 20/50  Train 43.1132  Val 1.2354  0.4s
Epoch 21/50  Train 42.8931  Val 1.2299  0.4s
Epoch 22/50  Train 42.6816  Val 1.2245  0.4s
Epoch 23/50  Train 42.4782  Val 1.2194  0.4s
Epoch 24/50  Train 42.2827  Val 1.2144  0.4s
Epoch 25/50  Train 42.0947  Val 1.2096  0.4s
Epoch 26/50  Train 41.9139  Val 1.2049  0.4s
Epoch 27/50  Train 41.7400  Val 1.2003  0.4s
Epoch 28/50  Train 41.5728  Val 1.1959  0.4s
Epoch 29/50  Train 41.4120  Val 1.1917  0.4s
Epoch 30/50  Train 41.2574  Val 1.1875  0.4s
Epoch 31/50  Train 41.1089  Val 1.1835  0.4s
Epoch 32/50  Train 40.9661  Val 1.1796  0.4s
Epoch 33/50  Train 40.8290  Val 1.1758  0.4s
Epoch 34/50  Train 40.6974  Val 1.1722  0.4s
Epoch 35/50  Train 40.5710  Val 1.1686  0.4s
Epoch 36/50  Train 40.4498  Val 1.1652  0.4s
Epoch 37/50  Train 40.3335  Val 1.1618  0.4s
Epoch 38/50  Train 40.2221  Val 1.1586  0.4s
Epoch 39/50  Train 40.1153  Val 1.1555  0.4s
Epoch 40/50  Train 40.0131  Val 1.1524  0.4s
Epoch 41/50  Train 39.9152  Val 1.1495  0.4s
Epoch 42/50  Train 39.8215  Val 1.1467  0.4s
Epoch 43/50  Train 39.7320  Val 1.1440  0.4s
Epoch 44/50  Train 39.6463  Val 1.1414  0.4s
Epoch 45/50  Train 39.5644  Val 1.1388  0.4s
Epoch 46/50  Train 39.4861  Val 1.1364  0.4s
Epoch 47/50  Train 39.4113  Val 1.1341  0.4s
Epoch 48/50  Train 39.3398  Val 1.1318  0.4s
Epoch 49/50  Train 39.2714  Val 1.1297  0.4s
Epoch 50/50  Train 39.2061  Val 1.1276  0.4s

Parameters used in the single-fit model:
input_len: 60
output_len: 20
input_dim: 2
output_dim: 2
no_tasks: 2
device: mps
epochs: 50
batch_size: 64
verbose: True
lr: 0.00001000
checkpoint_path: None
dropout: 0.00000000
l2_weight: 0.00001000
patience: 10
min_epochs: 30
min_delta: 0.00010000
d_model: 64
d_ff: 256
n_heads: 4
e_layers: 4
attn: prob
activation: gelu
output_attention: False
distil: True
embed: timeF
freq: d
features: MS
moving_avg: 25
factor: 3
task_name: long_term_forecast
hidden_dim: None
hidden_layers: None
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.62293285
1 day(s) RMSE                      : 2.78140908
1 day(s) R2                        : 0.03518668
1 day(s) Pearson r                 : 0.31395607
1 day(s) QLIKE                     : 0.77024815
3 day(s) MAE                       : 0.64255838
3 day(s) RMSE                      : 2.85562896
3 day(s) R2                        : -0.01694184
3 day(s) Pearson r                 : 0.13443186
3 day(s) QLIKE                     : 0.81615630
5 day(s) MAE                       : 0.64870067
5 day(s) RMSE                      : 2.86412968
5 day(s) R2                        : -0.02294678
5 day(s) Pearson r                 : 0.11027048
5 day(s) QLIKE                     : 0.86353055
10 day(s) MAE                      : 0.65201994
10 day(s) RMSE                     : 2.86880321
10 day(s) R2                       : -0.02614891
10 day(s) Pearson r                : 0.08861676
10 day(s) QLIKE                    : 0.90433764
20 day(s) MAE                      : 0.66397599
20 day(s) RMSE                     : 2.89140316
20 day(s) R2                       : -0.04225660
20 day(s) Pearson r                : 0.05447117
20 day(s) QLIKE                    : 0.92956264
full horizon MAE                   : 0.66397599
full horizon RMSE                  : 2.89140316
full horizon R2                    : -0.04225660
full horizon Pearson r             : 0.05447117
full horizon QLIKE                 : 0.92956264

--- Task 2 ---
1 day(s) MAE                       : 0.31459947
1 day(s) RMSE                      : 0.70555513
1 day(s) R2                        : -0.24805836
1 day(s) Pearson r                 : 0.03161115
1 day(s) QLIKE                     : 21.17692774
3 day(s) MAE                       : 111.47469893
3 day(s) RMSE                      : 4763.35197436
3 day(s) R2                        : -56620395.92976415
3 day(s) Pearson r                 : 0.00936466
3 day(s) QLIKE                     : 21.21323920
5 day(s) MAE                       : 67.03270204
5 day(s) RMSE                      : 3689.67673836
5 day(s) R2                        : -33939227.14359390
5 day(s) Pearson r                 : 0.00725068
5 day(s) QLIKE                     : 21.20319951
10 day(s) MAE                      : 33.67479180
10 day(s) RMSE                     : 2608.99549019
10 day(s) R2                       : -16968746.91207429
10 day(s) Pearson r                : 0.00512689
10 day(s) QLIKE                    : 20.49381617
20 day(s) MAE                      : 16.99865950
20 day(s) RMSE                     : 1844.83847213
20 day(s) R2                       : -8442555.01501749
20 day(s) Pearson r                : 0.00357271
20 day(s) QLIKE                    : 20.82399532
full horizon MAE                   : 16.99865950
full horizon RMSE                  : 1844.83847213
full horizon R2                    : -8442555.01501749
full horizon Pearson r             : 0.00357271
full horizon QLIKE                 : 20.82399532

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/Saved_objects/final_results_saved_object/SP500/ITransformer_H20.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.0510465, max=22.705
In [153]:
final_tickers  = ["AAPL", "MSFT", "GE", "BAC", "C", "BTCUSDT", "EURUSD", "GOLD", "SP500"]
final_models   = ["Simple_MLP", "Simple_KAN", "Simple_LSTM", "ITransformer"]
final_horizons = [1, 5, 10, 20]


all_results = run_garch_over_dict(
    structured_data_dict=structured_data_dict_10,
    tickers=final_tickers,
    horizons=final_horizons,
    results_dict=final_results_store,
    anchor_step=59,
    anchor_feature_idx=1,
    y_feature_idx=1,
    split_ratio=0.8,
    roll_window=500,
    mean_mode="Zero",
    candidates=[("GARCH", 1, 1, "t")],  
    sim_paths=2000,                        
    metric_horizons=(1, 5, 10, 20,-1),
    verbose=True,
    save_dir=None
)
[GARCH] AAPL — H=1 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=1, roll_window=500
  anchor 1613/2017: sd=1.87362, h1_pred=2.38816
  anchor 1694/2017: sd=1.83036, h1_pred=0.971728
  anchor 1775/2017: sd=1.75437, h1_pred=1.87798
  anchor 1856/2017: sd=1.63043, h1_pred=2.47048
  anchor 1937/2017: sd=1.36801, h1_pred=1.3947

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 2.244934
RMSE      : 3.204341
R2        : -2.334505
Pearson r : 0.037045
QLIKE     : 7.146105

full horizon
MAE       : 2.244934
RMSE      : 3.204341
R2        : -2.334505
Pearson r : 0.037045
QLIKE     : 7.146105

[GARCH] AAPL — H=5 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=5, roll_window=500
  anchor 1613/2017: sd=1.87362, h1_pred=2.38816
  anchor 1694/2017: sd=1.83036, h1_pred=0.971728
  anchor 1775/2017: sd=1.75437, h1_pred=1.87798
  anchor 1856/2017: sd=1.63043, h1_pred=2.47048
  anchor 1937/2017: sd=1.36801, h1_pred=1.3947

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 2.244934
RMSE      : 3.204341
R2        : -2.334505
Pearson r : 0.037045
QLIKE     : 7.146105

5 day(s)
MAE       : 2.255908
RMSE      : 3.162423
R2        : -2.228654
Pearson r : 0.050060
QLIKE     : 7.189652

full horizon
MAE       : 2.255908
RMSE      : 3.162423
R2        : -2.228654
Pearson r : 0.050060
QLIKE     : 7.189652

[GARCH] AAPL — H=10 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=10, roll_window=500
  anchor 1613/2017: sd=1.87362, h1_pred=2.38816
  anchor 1694/2017: sd=1.83036, h1_pred=0.971728
  anchor 1775/2017: sd=1.75437, h1_pred=1.87798
  anchor 1856/2017: sd=1.63043, h1_pred=2.47048
  anchor 1937/2017: sd=1.36801, h1_pred=1.3947

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 2.244934
RMSE      : 3.204341
R2        : -2.334505
Pearson r : 0.037045
QLIKE     : 7.146105

5 day(s)
MAE       : 2.255908
RMSE      : 3.162423
R2        : -2.228654
Pearson r : 0.050060
QLIKE     : 7.189652

10 day(s)
MAE       : 2.259492
RMSE      : 3.123894
R2        : -2.140761
Pearson r : 0.053776
QLIKE     : 7.200451

full horizon
MAE       : 2.259492
RMSE      : 3.123894
R2        : -2.140761
Pearson r : 0.053776
QLIKE     : 7.200451

[GARCH] AAPL — H=20 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=20, roll_window=500
  anchor 1613/2017: sd=1.87362, h1_pred=2.38816
  anchor 1694/2017: sd=1.83036, h1_pred=0.971728
  anchor 1775/2017: sd=1.75437, h1_pred=1.87798
  anchor 1856/2017: sd=1.63043, h1_pred=2.47048
  anchor 1937/2017: sd=1.36801, h1_pred=1.3947

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 2.244934
RMSE      : 3.204341
R2        : -2.334505
Pearson r : 0.037045
QLIKE     : 7.146105

5 day(s)
MAE       : 2.255908
RMSE      : 3.162423
R2        : -2.228654
Pearson r : 0.050060
QLIKE     : 7.189652

10 day(s)
MAE       : 2.259492
RMSE      : 3.123894
R2        : -2.140761
Pearson r : 0.053776
QLIKE     : 7.200451

20 day(s)
MAE       : 2.264942
RMSE      : 3.099011
R2        : -2.072988
Pearson r : 0.022402
QLIKE     : 7.186950

full horizon
MAE       : 2.264942
RMSE      : 3.099011
R2        : -2.072988
Pearson r : 0.022402
QLIKE     : 7.186950

[GARCH] MSFT — H=1 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=1, roll_window=500
  anchor 1613/2017: sd=1.94741, h1_pred=2.76349
  anchor 1694/2017: sd=1.9336, h1_pred=1.52029
  anchor 1775/2017: sd=1.80593, h1_pred=1.26253
  anchor 1856/2017: sd=1.65404, h1_pred=1.57182
  anchor 1937/2017: sd=1.42743, h1_pred=1.33005

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 1.786189
RMSE      : 2.192145
R2        : -1.374047
Pearson r : 0.062285
QLIKE     : 7.146787

full horizon
MAE       : 1.786189
RMSE      : 2.192145
R2        : -1.374047
Pearson r : 0.062285
QLIKE     : 7.146787

[GARCH] MSFT — H=5 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=5, roll_window=500
  anchor 1613/2017: sd=1.94741, h1_pred=2.76349
  anchor 1694/2017: sd=1.9336, h1_pred=1.52029
  anchor 1775/2017: sd=1.80593, h1_pred=1.26253
  anchor 1856/2017: sd=1.65404, h1_pred=1.57182
  anchor 1937/2017: sd=1.42743, h1_pred=1.33005

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 1.786189
RMSE      : 2.192145
R2        : -1.374047
Pearson r : 0.062285
QLIKE     : 7.146787

5 day(s)
MAE       : 1.798916
RMSE      : 2.209159
R2        : -1.395942
Pearson r : 0.050124
QLIKE     : 7.217582

full horizon
MAE       : 1.798916
RMSE      : 2.209159
R2        : -1.395942
Pearson r : 0.050124
QLIKE     : 7.217582

[GARCH] MSFT — H=10 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=10, roll_window=500
  anchor 1613/2017: sd=1.94741, h1_pred=2.76349
  anchor 1694/2017: sd=1.9336, h1_pred=1.52029
  anchor 1775/2017: sd=1.80593, h1_pred=1.26253
  anchor 1856/2017: sd=1.65404, h1_pred=1.57182
  anchor 1937/2017: sd=1.42743, h1_pred=1.33005

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 1.786189
RMSE      : 2.192145
R2        : -1.374047
Pearson r : 0.062285
QLIKE     : 7.146787

5 day(s)
MAE       : 1.798916
RMSE      : 2.209159
R2        : -1.395942
Pearson r : 0.050124
QLIKE     : 7.217582

10 day(s)
MAE       : 1.807977
RMSE      : 2.225826
R2        : -1.402882
Pearson r : 0.041701
QLIKE     : 7.243873

full horizon
MAE       : 1.807977
RMSE      : 2.225826
R2        : -1.402882
Pearson r : 0.041701
QLIKE     : 7.243873

[GARCH] MSFT — H=20 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=20, roll_window=500
  anchor 1613/2017: sd=1.94741, h1_pred=2.76349
  anchor 1694/2017: sd=1.9336, h1_pred=1.52029
  anchor 1775/2017: sd=1.80593, h1_pred=1.26253
  anchor 1856/2017: sd=1.65404, h1_pred=1.57182
  anchor 1937/2017: sd=1.42743, h1_pred=1.33005

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 1.786189
RMSE      : 2.192145
R2        : -1.374047
Pearson r : 0.062285
QLIKE     : 7.146787

5 day(s)
MAE       : 1.798916
RMSE      : 2.209159
R2        : -1.395942
Pearson r : 0.050124
QLIKE     : 7.217582

10 day(s)
MAE       : 1.807977
RMSE      : 2.225826
R2        : -1.402882
Pearson r : 0.041701
QLIKE     : 7.243873

20 day(s)
MAE       : 1.810601
RMSE      : 2.235479
R2        : -1.358412
Pearson r : 0.048946
QLIKE     : 7.203543

full horizon
MAE       : 1.810601
RMSE      : 2.235479
R2        : -1.358412
Pearson r : 0.048946
QLIKE     : 7.203543

[GARCH] GE — H=1 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=1, roll_window=500
  anchor 1613/2017: sd=2.11616, h1_pred=3.57706
  anchor 1694/2017: sd=2.07402, h1_pred=3.26143
  anchor 1775/2017: sd=2.28575, h1_pred=6.23968
  anchor 1856/2017: sd=2.22776, h1_pred=3.23082
  anchor 1937/2017: sd=2.22359, h1_pred=3.6585

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 3.633551
RMSE      : 4.848706
R2        : -3.263633
Pearson r : 0.004758
QLIKE     : 7.128652

full horizon
MAE       : 3.633551
RMSE      : 4.848706
R2        : -3.263633
Pearson r : 0.004758
QLIKE     : 7.128652

[GARCH] GE — H=5 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=5, roll_window=500
  anchor 1613/2017: sd=2.11616, h1_pred=3.57706
  anchor 1694/2017: sd=2.07402, h1_pred=3.26143
  anchor 1775/2017: sd=2.28575, h1_pred=6.23968
  anchor 1856/2017: sd=2.22776, h1_pred=3.23082
  anchor 1937/2017: sd=2.22359, h1_pred=3.6585

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 3.633551
RMSE      : 4.848706
R2        : -3.263633
Pearson r : 0.004758
QLIKE     : 7.128652

5 day(s)
MAE       : 3.566507
RMSE      : 4.593790
R2        : -2.819007
Pearson r : 0.031238
QLIKE     : 7.134824

full horizon
MAE       : 3.566507
RMSE      : 4.593790
R2        : -2.819007
Pearson r : 0.031238
QLIKE     : 7.134824

[GARCH] GE — H=10 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=10, roll_window=500
  anchor 1613/2017: sd=2.11616, h1_pred=3.57706
  anchor 1694/2017: sd=2.07402, h1_pred=3.26143
  anchor 1775/2017: sd=2.28575, h1_pred=6.23968
  anchor 1856/2017: sd=2.22776, h1_pred=3.23082
  anchor 1937/2017: sd=2.22359, h1_pred=3.6585

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 3.633551
RMSE      : 4.848706
R2        : -3.263633
Pearson r : 0.004758
QLIKE     : 7.128652

5 day(s)
MAE       : 3.566507
RMSE      : 4.593790
R2        : -2.819007
Pearson r : 0.031238
QLIKE     : 7.134824

10 day(s)
MAE       : 3.535873
RMSE      : 4.478415
R2        : -2.602853
Pearson r : 0.014892
QLIKE     : 7.128936

full horizon
MAE       : 3.535873
RMSE      : 4.478415
R2        : -2.602853
Pearson r : 0.014892
QLIKE     : 7.128936

[GARCH] GE — H=20 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=20, roll_window=500
  anchor 1613/2017: sd=2.11616, h1_pred=3.57706
  anchor 1694/2017: sd=2.07402, h1_pred=3.26143
  anchor 1775/2017: sd=2.28575, h1_pred=6.23968
  anchor 1856/2017: sd=2.22776, h1_pred=3.23082
  anchor 1937/2017: sd=2.22359, h1_pred=3.6585

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 3.633551
RMSE      : 4.848706
R2        : -3.263633
Pearson r : 0.004758
QLIKE     : 7.128652

5 day(s)
MAE       : 3.566507
RMSE      : 4.593790
R2        : -2.819007
Pearson r : 0.031238
QLIKE     : 7.134824

10 day(s)
MAE       : 3.535873
RMSE      : 4.478415
R2        : -2.602853
Pearson r : 0.014892
QLIKE     : 7.128936

20 day(s)
MAE       : 3.492241
RMSE      : 4.346271
R2        : -2.377670
Pearson r : 0.002262
QLIKE     : 7.074895

full horizon
MAE       : 3.492241
RMSE      : 4.346271
R2        : -2.377670
Pearson r : 0.002262
QLIKE     : 7.074895

[GARCH] BAC — H=1 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=1, roll_window=500
  anchor 1613/2017: sd=1.85719, h1_pred=2.64216
  anchor 1694/2017: sd=1.87482, h1_pred=2.98279
  anchor 1775/2017: sd=1.7586, h1_pred=3.65846
  anchor 1856/2017: sd=1.65528, h1_pred=2.10081
  anchor 1937/2017: sd=1.55539, h1_pred=2.13467

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 2.870229
RMSE      : 3.402386
R2        : -3.005962
Pearson r : -0.032652
QLIKE     : 7.860950

full horizon
MAE       : 2.870229
RMSE      : 3.402386
R2        : -3.005962
Pearson r : -0.032652
QLIKE     : 7.860950

[GARCH] BAC — H=5 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=5, roll_window=500
  anchor 1613/2017: sd=1.85719, h1_pred=2.64216
  anchor 1694/2017: sd=1.87482, h1_pred=2.98279
  anchor 1775/2017: sd=1.7586, h1_pred=3.65846
  anchor 1856/2017: sd=1.65528, h1_pred=2.10081
  anchor 1937/2017: sd=1.55539, h1_pred=2.13467

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 2.870229
RMSE      : 3.402386
R2        : -3.005962
Pearson r : -0.032652
QLIKE     : 7.860950

5 day(s)
MAE       : 2.931345
RMSE      : 3.338518
R2        : -2.828464
Pearson r : 0.046543
QLIKE     : 7.818094

full horizon
MAE       : 2.931345
RMSE      : 3.338518
R2        : -2.828464
Pearson r : 0.046543
QLIKE     : 7.818094

[GARCH] BAC — H=10 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=10, roll_window=500
  anchor 1613/2017: sd=1.85719, h1_pred=2.64216
  anchor 1694/2017: sd=1.87482, h1_pred=2.98279
  anchor 1775/2017: sd=1.7586, h1_pred=3.65846
  anchor 1856/2017: sd=1.65528, h1_pred=2.10081
  anchor 1937/2017: sd=1.55539, h1_pred=2.13467

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 2.870229
RMSE      : 3.402386
R2        : -3.005962
Pearson r : -0.032652
QLIKE     : 7.860950

5 day(s)
MAE       : 2.931345
RMSE      : 3.338518
R2        : -2.828464
Pearson r : 0.046543
QLIKE     : 7.818094

10 day(s)
MAE       : 2.978968
RMSE      : 3.359030
R2        : -2.847093
Pearson r : 0.063185
QLIKE     : 7.802285

full horizon
MAE       : 2.978968
RMSE      : 3.359030
R2        : -2.847093
Pearson r : 0.063185
QLIKE     : 7.802285

[GARCH] BAC — H=20 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=20, roll_window=500
  anchor 1613/2017: sd=1.85719, h1_pred=2.64216
  anchor 1694/2017: sd=1.87482, h1_pred=2.98279
  anchor 1775/2017: sd=1.7586, h1_pred=3.65846
  anchor 1856/2017: sd=1.65528, h1_pred=2.10081
  anchor 1937/2017: sd=1.55539, h1_pred=2.13467

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 2.870229
RMSE      : 3.402386
R2        : -3.005962
Pearson r : -0.032652
QLIKE     : 7.860950

5 day(s)
MAE       : 2.931345
RMSE      : 3.338518
R2        : -2.828464
Pearson r : 0.046543
QLIKE     : 7.818094

10 day(s)
MAE       : 2.978968
RMSE      : 3.359030
R2        : -2.847093
Pearson r : 0.063185
QLIKE     : 7.802285

20 day(s)
MAE       : 3.009191
RMSE      : 3.376419
R2        : -2.870120
Pearson r : 0.066700
QLIKE     : 7.729510

full horizon
MAE       : 3.009191
RMSE      : 3.376419
R2        : -2.870120
Pearson r : 0.066700
QLIKE     : 7.729510

[GARCH] C — H=1 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=1, roll_window=500
  anchor 1613/2017: sd=1.85564, h1_pred=2.64944
  anchor 1694/2017: sd=1.85929, h1_pred=2.85303
  anchor 1775/2017: sd=1.81797, h1_pred=2.15036
  anchor 1856/2017: sd=1.65736, h1_pred=2.84016
  anchor 1937/2017: sd=1.62662, h1_pred=2.14779

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 2.924805
RMSE      : 3.507115
R2        : -2.456264
Pearson r : -0.071509
QLIKE     : 7.395842

full horizon
MAE       : 2.924805
RMSE      : 3.507115
R2        : -2.456264
Pearson r : -0.071509
QLIKE     : 7.395842

[GARCH] C — H=5 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=5, roll_window=500
  anchor 1613/2017: sd=1.85564, h1_pred=2.64944
  anchor 1694/2017: sd=1.85929, h1_pred=2.85303
  anchor 1775/2017: sd=1.81797, h1_pred=2.15036
  anchor 1856/2017: sd=1.65736, h1_pred=2.84016
  anchor 1937/2017: sd=1.62662, h1_pred=2.14779

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 2.924805
RMSE      : 3.507115
R2        : -2.456264
Pearson r : -0.071509
QLIKE     : 7.395842

5 day(s)
MAE       : 2.915682
RMSE      : 3.423420
R2        : -2.266680
Pearson r : 0.006983
QLIKE     : 7.370242

full horizon
MAE       : 2.915682
RMSE      : 3.423420
R2        : -2.266680
Pearson r : 0.006983
QLIKE     : 7.370242

[GARCH] C — H=10 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=10, roll_window=500
  anchor 1613/2017: sd=1.85564, h1_pred=2.64944
  anchor 1694/2017: sd=1.85929, h1_pred=2.85303
  anchor 1775/2017: sd=1.81797, h1_pred=2.15036
  anchor 1856/2017: sd=1.65736, h1_pred=2.84016
  anchor 1937/2017: sd=1.62662, h1_pred=2.14779

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 2.924805
RMSE      : 3.507115
R2        : -2.456264
Pearson r : -0.071509
QLIKE     : 7.395842

5 day(s)
MAE       : 2.915682
RMSE      : 3.423420
R2        : -2.266680
Pearson r : 0.006983
QLIKE     : 7.370242

10 day(s)
MAE       : 2.912444
RMSE      : 3.399253
R2        : -2.198418
Pearson r : 0.024564
QLIKE     : 7.305151

full horizon
MAE       : 2.912444
RMSE      : 3.399253
R2        : -2.198418
Pearson r : 0.024564
QLIKE     : 7.305151

[GARCH] C — H=20 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2018, split=0.8 -> n_train=1614, t0=1613, anchors=405, H=20, roll_window=500
  anchor 1613/2017: sd=1.85564, h1_pred=2.64944
  anchor 1694/2017: sd=1.85929, h1_pred=2.85303
  anchor 1775/2017: sd=1.81797, h1_pred=2.15036
  anchor 1856/2017: sd=1.65736, h1_pred=2.84016
  anchor 1937/2017: sd=1.62662, h1_pred=2.14779

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 2.924805
RMSE      : 3.507115
R2        : -2.456264
Pearson r : -0.071509
QLIKE     : 7.395842

5 day(s)
MAE       : 2.915682
RMSE      : 3.423420
R2        : -2.266680
Pearson r : 0.006983
QLIKE     : 7.370242

10 day(s)
MAE       : 2.912444
RMSE      : 3.399253
R2        : -2.198418
Pearson r : 0.024564
QLIKE     : 7.305151

20 day(s)
MAE       : 2.899955
RMSE      : 3.379488
R2        : -2.145620
Pearson r : 0.034844
QLIKE     : 7.280166

full horizon
MAE       : 2.899955
RMSE      : 3.379488
R2        : -2.145620
Pearson r : 0.034844
QLIKE     : 7.280166

[GARCH] BTCUSDT — H=1 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2399, split=0.8 -> n_train=1919, t0=1918, anchors=481, H=1, roll_window=500
  anchor 1918/2398: sd=2.49753, h1_pred=7.90099
  anchor 2014/2398: sd=2.46728, h1_pred=11.4554
  anchor 2110/2398: sd=2.42276, h1_pred=6.17304
  anchor 2206/2398: sd=2.49018, h1_pred=6.24523
  anchor 2302/2398: sd=2.62524, h1_pred=7.74882
  anchor 2398/2398: sd=2.72702, h1_pred=7.38778

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 10.581360
RMSE      : 13.109958
R2        : -22.367818
Pearson r : 0.028521
QLIKE     : 7.610465

full horizon
MAE       : 10.581360
RMSE      : 13.109958
R2        : -22.367818
Pearson r : 0.028521
QLIKE     : 7.610465

[GARCH] BTCUSDT — H=5 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2399, split=0.8 -> n_train=1919, t0=1918, anchors=481, H=5, roll_window=500
  anchor 1918/2398: sd=2.49753, h1_pred=7.90099
  anchor 2014/2398: sd=2.46728, h1_pred=11.4554
  anchor 2110/2398: sd=2.42276, h1_pred=6.17304
  anchor 2206/2398: sd=2.49018, h1_pred=6.24523
  anchor 2302/2398: sd=2.62524, h1_pred=7.74882
  anchor 2398/2398: sd=2.72702, h1_pred=7.38778

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 10.581360
RMSE      : 13.109958
R2        : -22.367818
Pearson r : 0.028521
QLIKE     : 7.610465

5 day(s)
MAE       : 11.462170
RMSE      : 14.056496
R2        : -25.936163
Pearson r : 0.049892
QLIKE     : 7.593759

full horizon
MAE       : 11.462170
RMSE      : 14.056496
R2        : -25.936163
Pearson r : 0.049892
QLIKE     : 7.593759

[GARCH] BTCUSDT — H=10 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2399, split=0.8 -> n_train=1919, t0=1918, anchors=481, H=10, roll_window=500
  anchor 1918/2398: sd=2.49753, h1_pred=7.90099
  anchor 2014/2398: sd=2.46728, h1_pred=11.4554
  anchor 2110/2398: sd=2.42276, h1_pred=6.17304
  anchor 2206/2398: sd=2.49018, h1_pred=6.24523
  anchor 2302/2398: sd=2.62524, h1_pred=7.74882
  anchor 2398/2398: sd=2.72702, h1_pred=7.38778

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 10.581360
RMSE      : 13.109958
R2        : -22.367818
Pearson r : 0.028521
QLIKE     : 7.610465

5 day(s)
MAE       : 11.462170
RMSE      : 14.056496
R2        : -25.936163
Pearson r : 0.049892
QLIKE     : 7.593759

10 day(s)
MAE       : 12.160214
RMSE      : 15.322183
R2        : -31.112685
Pearson r : 0.043659
QLIKE     : 7.602872

full horizon
MAE       : 12.160214
RMSE      : 15.322183
R2        : -31.112685
Pearson r : 0.043659
QLIKE     : 7.602872

[GARCH] BTCUSDT — H=20 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=2399, split=0.8 -> n_train=1919, t0=1918, anchors=481, H=20, roll_window=500
  anchor 1918/2398: sd=2.49753, h1_pred=7.90099
  anchor 2014/2398: sd=2.46728, h1_pred=11.4554
  anchor 2110/2398: sd=2.42276, h1_pred=6.17304
  anchor 2206/2398: sd=2.49018, h1_pred=6.24523
  anchor 2302/2398: sd=2.62524, h1_pred=7.74882
  anchor 2398/2398: sd=2.72702, h1_pred=7.38778

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 10.581360
RMSE      : 13.109958
R2        : -22.367818
Pearson r : 0.028521
QLIKE     : 7.610465

5 day(s)
MAE       : 11.462170
RMSE      : 14.056496
R2        : -25.936163
Pearson r : 0.049892
QLIKE     : 7.593759

10 day(s)
MAE       : 12.160214
RMSE      : 15.322183
R2        : -31.112685
Pearson r : 0.043659
QLIKE     : 7.602872

20 day(s)
MAE       : 13.009847
RMSE      : 17.526014
R2        : -41.054064
Pearson r : 0.026922
QLIKE     : 7.624235

full horizon
MAE       : 13.009847
RMSE      : 17.526014
R2        : -41.054064
Pearson r : 0.026922
QLIKE     : 7.624235

[GARCH] EURUSD — H=1 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=3782, split=0.8 -> n_train=3025, t0=3024, anchors=758, H=1, roll_window=500
  anchor 3024/3781: sd=0.482831, h1_pred=0.544266
  anchor 3175/3781: sd=0.523006, h1_pred=0.210495
  anchor 3326/3781: sd=0.514012, h1_pred=0.236107
  anchor 3477/3781: sd=0.442679, h1_pred=0.0999663
  anchor 3628/3781: sd=0.351926, h1_pred=0.0976806
  anchor 3779/3781: sd=0.415898, h1_pred=0.424184

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 0.349073
RMSE      : 0.465803
R2        : -0.233381
Pearson r : 0.053204
QLIKE     : 7.951148

full horizon
MAE       : 0.349073
RMSE      : 0.465803
R2        : -0.233381
Pearson r : 0.053204
QLIKE     : 7.951148

[GARCH] EURUSD — H=5 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=3782, split=0.8 -> n_train=3025, t0=3024, anchors=758, H=5, roll_window=500
  anchor 3024/3781: sd=0.482831, h1_pred=0.544266
  anchor 3175/3781: sd=0.523006, h1_pred=0.210495
  anchor 3326/3781: sd=0.514012, h1_pred=0.236107
  anchor 3477/3781: sd=0.442679, h1_pred=0.0999663
  anchor 3628/3781: sd=0.351926, h1_pred=0.0976806
  anchor 3779/3781: sd=0.415898, h1_pred=0.424184

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 0.349073
RMSE      : 0.465803
R2        : -0.233381
Pearson r : 0.053204
QLIKE     : 7.951148

5 day(s)
MAE       : 0.349318
RMSE      : 0.466015
R2        : -0.243071
Pearson r : 0.040456
QLIKE     : 7.965275

full horizon
MAE       : 0.349318
RMSE      : 0.466015
R2        : -0.243071
Pearson r : 0.040456
QLIKE     : 7.965275

[GARCH] EURUSD — H=10 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=3782, split=0.8 -> n_train=3025, t0=3024, anchors=758, H=10, roll_window=500
  anchor 3024/3781: sd=0.482831, h1_pred=0.544266
  anchor 3175/3781: sd=0.523006, h1_pred=0.210495
  anchor 3326/3781: sd=0.514012, h1_pred=0.236107
  anchor 3477/3781: sd=0.442679, h1_pred=0.0999663
  anchor 3628/3781: sd=0.351926, h1_pred=0.0976806
  anchor 3779/3781: sd=0.415898, h1_pred=0.424184

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 0.349073
RMSE      : 0.465803
R2        : -0.233381
Pearson r : 0.053204
QLIKE     : 7.951148

5 day(s)
MAE       : 0.349318
RMSE      : 0.466015
R2        : -0.243071
Pearson r : 0.040456
QLIKE     : 7.965275

10 day(s)
MAE       : 0.351075
RMSE      : 0.468390
R2        : -0.257742
Pearson r : 0.022274
QLIKE     : 7.985322

full horizon
MAE       : 0.351075
RMSE      : 0.468390
R2        : -0.257742
Pearson r : 0.022274
QLIKE     : 7.985322

[GARCH] EURUSD — H=20 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=3782, split=0.8 -> n_train=3025, t0=3024, anchors=758, H=20, roll_window=500
  anchor 3024/3781: sd=0.482831, h1_pred=0.544266
  anchor 3175/3781: sd=0.523006, h1_pred=0.210495
  anchor 3326/3781: sd=0.514012, h1_pred=0.236107
  anchor 3477/3781: sd=0.442679, h1_pred=0.0999663
  anchor 3628/3781: sd=0.351926, h1_pred=0.0976806
  anchor 3779/3781: sd=0.415898, h1_pred=0.424184

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 0.349073
RMSE      : 0.465803
R2        : -0.233381
Pearson r : 0.053204
QLIKE     : 7.951148

5 day(s)
MAE       : 0.349318
RMSE      : 0.466015
R2        : -0.243071
Pearson r : 0.040456
QLIKE     : 7.965275

10 day(s)
MAE       : 0.351075
RMSE      : 0.468390
R2        : -0.257742
Pearson r : 0.022274
QLIKE     : 7.985322

20 day(s)
MAE       : 0.354225
RMSE      : 0.471626
R2        : -0.270650
Pearson r : 0.010490
QLIKE     : 7.998339

full horizon
MAE       : 0.354225
RMSE      : 0.471626
R2        : -0.270650
Pearson r : 0.010490
QLIKE     : 7.998339

[GARCH] GOLD — H=1 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=5534, split=0.8 -> n_train=4427, t0=4426, anchors=1108, H=1, roll_window=500
  anchor 4426/5533: sd=1.04494, h1_pred=0.70255
  anchor 4647/5533: sd=0.795476, h1_pred=0.745113
  anchor 4868/5533: sd=0.809417, h1_pred=0.85374
  anchor 5089/5533: sd=0.788481, h1_pred=0.492214
  anchor 5310/5533: sd=0.810009, h1_pred=1.0791
  anchor 5531/5533: sd=0.906483, h1_pred=1.49614

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 0.952836
RMSE      : 1.178645
R2        : -0.956726
Pearson r : -0.016528
QLIKE     : 7.512093

full horizon
MAE       : 0.952836
RMSE      : 1.178645
R2        : -0.956726
Pearson r : -0.016528
QLIKE     : 7.512093

[GARCH] GOLD — H=5 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=5534, split=0.8 -> n_train=4427, t0=4426, anchors=1108, H=5, roll_window=500
  anchor 4426/5533: sd=1.04494, h1_pred=0.70255
  anchor 4647/5533: sd=0.795476, h1_pred=0.745113
  anchor 4868/5533: sd=0.809417, h1_pred=0.85374
  anchor 5089/5533: sd=0.788481, h1_pred=0.492214
  anchor 5310/5533: sd=0.810009, h1_pred=1.0791
  anchor 5531/5533: sd=0.906483, h1_pred=1.49614

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 0.952836
RMSE      : 1.178645
R2        : -0.956726
Pearson r : -0.016528
QLIKE     : 7.512093

5 day(s)
MAE       : 0.955287
RMSE      : 1.179401
R2        : -0.951522
Pearson r : -0.003565
QLIKE     : 7.490822

full horizon
MAE       : 0.955287
RMSE      : 1.179401
R2        : -0.951522
Pearson r : -0.003565
QLIKE     : 7.490822

[GARCH] GOLD — H=10 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=5534, split=0.8 -> n_train=4427, t0=4426, anchors=1108, H=10, roll_window=500
  anchor 4426/5533: sd=1.04494, h1_pred=0.70255
  anchor 4647/5533: sd=0.795476, h1_pred=0.745113
  anchor 4868/5533: sd=0.809417, h1_pred=0.85374
  anchor 5089/5533: sd=0.788481, h1_pred=0.492214
  anchor 5310/5533: sd=0.810009, h1_pred=1.0791
  anchor 5531/5533: sd=0.906483, h1_pred=1.49614

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 0.952836
RMSE      : 1.178645
R2        : -0.956726
Pearson r : -0.016528
QLIKE     : 7.512093

5 day(s)
MAE       : 0.955287
RMSE      : 1.179401
R2        : -0.951522
Pearson r : -0.003565
QLIKE     : 7.490822

10 day(s)
MAE       : 0.961594
RMSE      : 1.183407
R2        : -0.955371
Pearson r : 0.005137
QLIKE     : 7.497311

full horizon
MAE       : 0.961594
RMSE      : 1.183407
R2        : -0.955371
Pearson r : 0.005137
QLIKE     : 7.497311

[GARCH] GOLD — H=20 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=5534, split=0.8 -> n_train=4427, t0=4426, anchors=1108, H=20, roll_window=500
  anchor 4426/5533: sd=1.04494, h1_pred=0.70255
  anchor 4647/5533: sd=0.795476, h1_pred=0.745113
  anchor 4868/5533: sd=0.809417, h1_pred=0.85374
  anchor 5089/5533: sd=0.788481, h1_pred=0.492214
  anchor 5310/5533: sd=0.810009, h1_pred=1.0791
  anchor 5531/5533: sd=0.906483, h1_pred=1.49614

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 0.952836
RMSE      : 1.178645
R2        : -0.956726
Pearson r : -0.016528
QLIKE     : 7.512093

5 day(s)
MAE       : 0.955287
RMSE      : 1.179401
R2        : -0.951522
Pearson r : -0.003565
QLIKE     : 7.490822

10 day(s)
MAE       : 0.961594
RMSE      : 1.183407
R2        : -0.955371
Pearson r : 0.005137
QLIKE     : 7.497311

20 day(s)
MAE       : 0.973243
RMSE      : 1.194606
R2        : -0.975036
Pearson r : 0.002903
QLIKE     : 7.532503

full horizon
MAE       : 0.973243
RMSE      : 1.194606
R2        : -0.975036
Pearson r : 0.002903
QLIKE     : 7.532503

[GARCH] SP500 — H=1 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=3466, split=0.8 -> n_train=2772, t0=2771, anchors=695, H=1, roll_window=500
  anchor 2771/3465: sd=1.15234, h1_pred=0.784653
  anchor 2910/3465: sd=1.16472, h1_pred=0.36848
  anchor 3049/3465: sd=1.02343, h1_pred=0.403425
  anchor 3188/3465: sd=0.790157, h1_pred=0.431721
  anchor 3327/3465: sd=0.681993, h1_pred=0.501682

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 0.964632
RMSE      : 2.217733
R2        : -4.540694
Pearson r : 0.010440
QLIKE     : 7.309367

full horizon
MAE       : 0.964632
RMSE      : 2.217733
R2        : -4.540694
Pearson r : 0.010440
QLIKE     : 7.309367

[GARCH] SP500 — H=5 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=3466, split=0.8 -> n_train=2772, t0=2771, anchors=695, H=5, roll_window=500
  anchor 2771/3465: sd=1.15234, h1_pred=0.784653
  anchor 2910/3465: sd=1.16472, h1_pred=0.36848
  anchor 3049/3465: sd=1.02343, h1_pred=0.403425
  anchor 3188/3465: sd=0.790157, h1_pred=0.431721
  anchor 3327/3465: sd=0.681993, h1_pred=0.501682

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 0.964632
RMSE      : 2.217733
R2        : -4.540694
Pearson r : 0.010440
QLIKE     : 7.309367

5 day(s)
MAE       : 0.963199
RMSE      : 2.112908
R2        : -4.020409
Pearson r : 0.025739
QLIKE     : 7.327925

full horizon
MAE       : 0.963199
RMSE      : 2.112908
R2        : -4.020409
Pearson r : 0.025739
QLIKE     : 7.327925

[GARCH] SP500 — H=10 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=3466, split=0.8 -> n_train=2772, t0=2771, anchors=695, H=10, roll_window=500
  anchor 2771/3465: sd=1.15234, h1_pred=0.784653
  anchor 2910/3465: sd=1.16472, h1_pred=0.36848
  anchor 3049/3465: sd=1.02343, h1_pred=0.403425
  anchor 3188/3465: sd=0.790157, h1_pred=0.431721
  anchor 3327/3465: sd=0.681993, h1_pred=0.501682

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 0.964632
RMSE      : 2.217733
R2        : -4.540694
Pearson r : 0.010440
QLIKE     : 7.309367

5 day(s)
MAE       : 0.963199
RMSE      : 2.112908
R2        : -4.020409
Pearson r : 0.025739
QLIKE     : 7.327925

10 day(s)
MAE       : 0.958098
RMSE      : 2.002501
R2        : -3.512686
Pearson r : 0.038548
QLIKE     : 7.347556

full horizon
MAE       : 0.958098
RMSE      : 2.002501
R2        : -3.512686
Pearson r : 0.038548
QLIKE     : 7.347556

[GARCH] SP500 — H=20 (step=59, x_feat=1, y_feat=1)
[vol rolling] B=3466, split=0.8 -> n_train=2772, t0=2771, anchors=695, H=20, roll_window=500
  anchor 2771/3465: sd=1.15234, h1_pred=0.784653
  anchor 2910/3465: sd=1.16472, h1_pred=0.36848
  anchor 3049/3465: sd=1.02343, h1_pred=0.403425
  anchor 3188/3465: sd=0.790157, h1_pred=0.431721
  anchor 3327/3465: sd=0.681993, h1_pred=0.501682

[metrics — vol rolling (ML-aligned split)]

1 day(s)
MAE       : 0.964632
RMSE      : 2.217733
R2        : -4.540694
Pearson r : 0.010440
QLIKE     : 7.309367

5 day(s)
MAE       : 0.963199
RMSE      : 2.112908
R2        : -4.020409
Pearson r : 0.025739
QLIKE     : 7.327925

10 day(s)
MAE       : 0.958098
RMSE      : 2.002501
R2        : -3.512686
Pearson r : 0.038548
QLIKE     : 7.347556

20 day(s)
MAE       : 0.937416
RMSE      : 1.837063
R2        : -2.796084
Pearson r : 0.045488
QLIKE     : 7.333914

full horizon
MAE       : 0.937416
RMSE      : 1.837063
R2        : -2.796084
Pearson r : 0.045488
QLIKE     : 7.333914
In [160]:
final_frames = build_metric_frames(final_results_store, outer_horizon="full", pretty_print=True)
=== EURUSD | GARCH | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.349073  0.349318  0.351075  0.354225
Pearson r  0.053204  0.040456  0.022274  0.010490
QLIKE      7.951148  7.965275  7.985322  7.998339
R2        -0.233381 -0.243071 -0.257742 -0.270650
RMSE       0.465803  0.466015  0.468390  0.471626

=== EURUSD | ITransformer | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.107403  0.110138  0.112221  0.114818
Pearson r  0.397039  0.357847  0.322897  0.283393
QLIKE      0.407426  0.439176  0.463336  0.486015
R2         0.004401 -0.018628 -0.058046 -0.083348
RMSE       0.213172  0.213587  0.216649  0.217942

=== EURUSD | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.084093  0.090159  0.094697  0.099880
Pearson r  0.614829  0.538825  0.505370  0.462240
QLIKE      0.410261  0.425955  0.424235  0.488096
R2         0.352545  0.271619  0.233426  0.176566
RMSE       0.171907  0.180612  0.184408  0.190008

=== EURUSD | Simple_LSTM | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.085509  0.090961  0.098495  0.095458
Pearson r  0.646411  0.558189  0.487512  0.444499
QLIKE      0.399726  0.438345  0.510281  0.511468
R2         0.401153  0.297512  0.190665  0.177578
RMSE       0.165328  0.177372  0.189482  0.189891

=== EURUSD | Simple_MLP | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.088861  0.094690  0.100409  0.101710
Pearson r  0.561921  0.520609  0.483216  0.451376
QLIKE      0.487990  0.438364  0.451981  0.483321
R2         0.306227  0.244793  0.193126  0.158975
RMSE       0.177950  0.183907  0.189194  0.192027

=== AAPL | GARCH | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.244934  2.255908  2.259492  2.264942
Pearson r  0.037045  0.050060  0.053776  0.022402
QLIKE      7.146105  7.189652  7.200451  7.186950
R2        -2.334505 -2.228654 -2.140761 -2.072988
RMSE       3.204341  3.162423  3.123894  3.099011

=== AAPL | ITransformer | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.440809  2.552977  2.581058  2.612338
Pearson r  0.230568  0.197919  0.149980  0.106349
QLIKE      0.533305  0.555028  0.558112  0.558632
R2        -0.017570 -0.019944 -0.020692 -0.021553
RMSE       8.718175  9.027910  9.068595  9.091777

=== AAPL | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.092918  2.424971  2.503462  2.518576
Pearson r  0.425086  0.345513  0.251507  0.185200
QLIKE      0.278484  0.403303  0.426101  0.476400
R2         0.154880  0.026756  0.012712 -0.024511
RMSE       7.945159  8.818807  8.918966  9.104931

=== AAPL | Simple_LSTM | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.381797  2.396292  2.724439  2.683451
Pearson r  0.450763  0.397629  0.188058  0.075199
QLIKE      0.533954  0.359122  0.445717  0.491763
R2        -0.026858  0.116960 -0.057191 -0.066081
RMSE       8.757876  8.400193  9.229315  9.287809

=== AAPL | Simple_MLP | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.133557  2.490259  2.587152  2.561629
Pearson r  0.467360  0.354377  0.192889  0.153891
QLIKE      0.288490  0.368075  0.456740  0.484823
R2         0.183208  0.110879 -0.000617 -0.023261
RMSE       7.810869  8.429067  8.978971  9.099373

=== MSFT | GARCH | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        1.786189  1.798916  1.807977  1.810601
Pearson r  0.062285  0.050124  0.041701  0.048946
QLIKE      7.146787  7.217582  7.243873  7.203543
R2        -1.374047 -1.395942 -1.402882 -1.358412
RMSE       2.192145  2.209159  2.225826  2.235479

=== MSFT | ITransformer | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        1.931308  2.031079  2.074009  2.139999
Pearson r  0.225590  0.063310  0.016352  0.002562
QLIKE      0.415285  0.467463  0.489862  0.504887
R2         0.016365 -0.023119 -0.033173 -0.037122
RMSE       6.647700  6.779962  6.813446  7.032386

=== MSFT | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.059900  2.102978  2.127596  2.177952
Pearson r  0.131766 -0.008312 -0.032547 -0.022945
QLIKE      0.488507  0.496964  0.502815  0.503067
R2         0.002540 -0.026013 -0.031073 -0.027268
RMSE       6.694253  6.789545  6.806518  6.998898

=== MSFT | Simple_LSTM | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        1.794123  1.995786  1.999023  2.161636
Pearson r  0.306164  0.177077  0.151018  0.077415
QLIKE      0.352571  0.386446  0.429503  0.504174
R2         0.070195 -0.005889 -0.008239 -0.028414
RMSE       6.463240  6.722631  6.730728  7.002798

=== MSFT | Simple_MLP | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.170486  2.076898  2.117085  2.182670
Pearson r  0.089911  0.006929  0.006607  0.002986
QLIKE      0.461403  0.496065  0.494443  0.505668
R2        -0.000529 -0.032200 -0.027595 -0.026518
RMSE       6.704543  6.809986  6.795027  6.996340

=== GE | GARCH | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        3.633551  3.566507  3.535873  3.492241
Pearson r  0.004758  0.031238  0.014892  0.002262
QLIKE      7.128652  7.134824  7.128936  7.074895
R2        -3.263633 -2.819007 -2.602853 -2.377670
RMSE       4.848706  4.593790  4.478415  4.346271

=== GE | ITransformer | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                  H1         H5        H10        H20
MAE         4.965051   4.962709   4.971934   4.985579
Pearson r  -0.028817  -0.017652  -0.010269  -0.006401
QLIKE       0.685489   0.682444   0.679309   0.675004
R2         -0.001451  -0.001453  -0.001492  -0.001500
RMSE       27.446072  27.444522  27.447080  27.447914

=== GE | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                  H1         H5        H10        H20
MAE         4.165101   4.162069   4.398488   4.299499
Pearson r   0.021352   0.009339   0.004121  -0.008817
QLIKE       0.707827   0.714538   0.751861   0.710987
R2         -0.014899  -0.013084  -0.015871  -0.011972
RMSE       27.629740  27.603435  27.643412  27.591037

=== GE | Simple_LSTM | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                  H1         H5        H10        H20
MAE         4.868009   4.076695   4.529892   4.529903
Pearson r   0.057023   0.054758   0.017229   0.004986
QLIKE       0.684371   0.653308   0.683444   0.703472
R2         -0.001841  -0.007966  -0.037158  -0.012408
RMSE       27.451418  27.533626  27.931529  27.596981

=== GE | Simple_MLP | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                  H1         H5        H10        H20
MAE         4.312359   4.216504   4.366981   4.661683
Pearson r   0.020144   0.024821  -0.002464  -0.002972
QLIKE       0.704225   0.693025   0.734319   0.753618
R2         -0.009984  -0.014046  -0.015077  -0.015395
RMSE       27.562751  27.616541  27.632609  27.637670

=== BAC | GARCH | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.870229  2.931345  2.978968  3.009191
Pearson r -0.032652  0.046543  0.063185  0.066700
QLIKE      7.860950  7.818094  7.802285  7.729510
R2        -3.005962 -2.828464 -2.847093 -2.870120
RMSE       3.402386  3.338518  3.359030  3.376419

=== BAC | ITransformer | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.135931  2.221024  2.247263  2.265229
Pearson r  0.338053  0.132651  0.079439  0.039138
QLIKE      0.347708  0.400563  0.412225  0.416911
R2         0.039809 -0.004133 -0.011983 -0.017090
RMSE       7.606597  7.782393  7.813123  7.831522

=== BAC | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10        H20
MAE        2.322486  2.229797  2.245889   3.150809
Pearson r  0.383914  0.102959  0.032765   0.027081
QLIKE      0.277494  0.416571  0.425780   0.488646
R2         0.090022 -0.010511 -0.017102  -0.939095
RMSE       7.405035  7.807068  7.832858  10.813494

=== BAC | Simple_LSTM | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        1.952279  2.185919  2.228739  2.389312
Pearson r  0.373382  0.232467  0.155889  0.065314
QLIKE      0.241762  0.335168  0.356134  0.411970
R2         0.123545 -0.002525 -0.030704 -0.157433
RMSE       7.267358  7.776159  7.885061  8.354384

=== BAC | Simple_MLP | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.207774  2.427097  2.471279  2.348198
Pearson r  0.368325  0.169163  0.068570 -0.007246
QLIKE      0.321662  0.408902  0.458190  0.433901
R2         0.077676  0.020946 -0.014267 -0.016783
RMSE       7.455098  7.684592  7.821933  7.830341

=== C | GARCH | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.924805  2.915682  2.912444  2.899955
Pearson r -0.071509  0.006983  0.024564  0.034844
QLIKE      7.395842  7.370242  7.305151  7.280166
R2        -2.456264 -2.266680 -2.198418 -2.145620
RMSE       3.507115  3.423420  3.399253  3.379488

=== C | ITransformer | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.645889  2.790753  2.844405  2.883661
Pearson r  0.374447  0.210043  0.141742  0.091183
QLIKE      0.346335  0.396539  0.411933  0.418188
R2         0.038077 -0.010547 -0.022949 -0.030104
RMSE       9.257640  9.527135  9.596620  9.633940

=== C | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.699713  2.862133  3.018361  2.868924
Pearson r  0.465585  0.277344  0.161802  0.046229
QLIKE      0.281128  0.369118  0.421271  0.429703
R2         0.203245  0.058570 -0.014295 -0.037031
RMSE       8.425441  9.195553  9.555944  9.666279

=== C | Simple_LSTM | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5        H10        H20
MAE        2.415491  2.835218   3.431802   3.222991
Pearson r  0.507865  0.280032   0.143978   0.084988
QLIKE      0.247028  0.338508   0.374971   0.418580
R2         0.203331  0.006735  -0.509005  -0.134118
RMSE       8.424982  9.445315  11.655656  10.108636

=== C | Simple_MLP | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.703733  3.033120  2.904410  2.939347
Pearson r  0.548943  0.239157  0.101051  0.044397
QLIKE      0.284079  0.387526  0.423407  0.433286
R2         0.282685  0.037287 -0.022719 -0.032764
RMSE       7.994385  9.298918  9.595545  9.646375

=== BTCUSDT | GARCH | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                  H1         H5        H10        H20
MAE        10.581360  11.462170  12.160214  13.009847
Pearson r   0.028521   0.049892   0.043659   0.026922
QLIKE       7.610465   7.593759   7.602872   7.624235
R2        -22.367818 -25.936163 -31.112685 -41.054064
RMSE       13.109958  14.056496  15.322183  17.526014

=== BTCUSDT | ITransformer | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        4.568197  4.653337  4.697653  4.740776
Pearson r  0.232797  0.095580  0.029499 -0.041871
QLIKE      0.360800  0.374173  0.380413  0.385184
R2         0.010904 -0.008462 -0.016329 -0.023895
RMSE       9.135728  9.223924  9.260948  9.293716

=== BTCUSDT | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        3.931521  4.448533  4.647752  4.842995
Pearson r  0.410540  0.231193  0.123902  0.058660
QLIKE      0.243445  0.347832  0.383631  0.403062
R2         0.112389  0.006071 -0.049880 -0.059379
RMSE       8.654369  9.157219  9.412567  9.453382

=== BTCUSDT | Simple_LSTM | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10        H20
MAE        4.171120  4.231545  4.366575   5.884454
Pearson r  0.346522  0.260723  0.198533   0.101963
QLIKE      0.281291  0.324546  0.357001   0.484802
R2         0.050646  0.023611 -0.041306  -0.221017
RMSE       8.950310  9.076061  9.374053  10.148982

=== BTCUSDT | Simple_MLP | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        4.187088  4.548116  4.574250  4.601800
Pearson r  0.347905  0.234948  0.189766  0.121917
QLIKE      0.271769  0.336755  0.365160  0.373111
R2         0.023958 -0.082035 -0.076952 -0.108087
RMSE       9.075244  9.554470  9.533149  9.668266

=== GOLD | GARCH | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.952836  0.955287  0.961594  0.973243
Pearson r -0.016528 -0.003565  0.005137  0.002903
QLIKE      7.512093  7.490822  7.497311  7.532503
R2        -0.956726 -0.951522 -0.955371 -0.975036
RMSE       1.178645  1.179401  1.183407  1.194606

=== GOLD | ITransformer | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.469837  0.480508  0.486717  0.498326
Pearson r  0.317324  0.250319  0.207961  0.129706
QLIKE      0.490178  0.505330  0.519712  0.538747
R2        -0.332768 -0.372561 -0.388762 -0.419045
RMSE       0.756532  0.769159  0.778037  0.792343

=== GOLD | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.288624  0.306847  0.316029  0.342880
Pearson r  0.594212  0.504068  0.499918  0.414966
QLIKE      0.544326  0.443687  0.492770  0.582242
R2         0.310299  0.242446  0.206566  0.108536
RMSE       0.544228  0.571422  0.588087  0.628010

=== GOLD | Simple_LSTM | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.265208  0.282760  0.304010  0.330226
Pearson r  0.608979  0.543527  0.473712  0.368577
QLIKE      0.393908  0.486091  0.541941  0.681796
R2         0.356698  0.251851  0.206356  0.042044
RMSE       0.525603  0.567864  0.588165  0.651010

=== GOLD | Simple_MLP | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.298556  0.309862  0.316044  0.342462
Pearson r  0.547382  0.490942  0.443987  0.353597
QLIKE      0.367743  0.509587  0.562998  0.556682
R2         0.238052  0.216846  0.146739  0.034796
RMSE       0.572022  0.580997  0.609856  0.653468

=== SP500 | GARCH | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.964632  0.963199  0.958098  0.937416
Pearson r  0.010440  0.025739  0.038548  0.045488
QLIKE      7.309367  7.327925  7.347556  7.333914
R2        -4.540694 -4.020409 -3.512686 -2.796084
RMSE       2.217733  2.112908  2.002501  1.837063

=== SP500 | ITransformer | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.611600  0.643905  0.655040  0.663976
Pearson r  0.386171  0.181329  0.112304  0.054471
QLIKE      0.708717  0.825929  0.895425  0.929563
R2         0.120074  0.006448 -0.019542 -0.042257
RMSE       2.656234  2.822679  2.859553  2.891403

=== SP500 | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.499970  0.790432  0.855580  0.907591
Pearson r  0.654030  0.251395  0.158396  0.107911
QLIKE      0.471918  0.731043  0.889797  0.872193
R2         0.283898 -0.750919 -0.660475 -0.756777
RMSE       2.396241  3.747134  3.649318  3.753873

=== SP500 | Simple_LSTM | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.469331  0.670513  0.641873  0.690240
Pearson r  0.584546  0.320906  0.248781  0.217409
QLIKE      0.386871  0.721224  0.847337  0.948819
R2         0.258683 -0.212322  0.019493  0.039615
RMSE       2.438065  3.117993  2.804276  2.775518

=== SP500 | Simple_MLP | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.519677  0.614845  0.695073  0.703336
Pearson r  0.615524  0.413420  0.230992  0.166332
QLIKE      0.543671  1.015928  0.943434  0.867437
R2         0.328406  0.157536 -0.020003 -0.019545
RMSE       2.320580  2.599212  2.860199  2.859727
In [161]:
import os
os.makedirs("results", exist_ok=True)


export_metrics(final_results_store, "results/metrics_1_task_all.csv", outer_horizon="full")
export_metrics(final_results_store, "results/metrics_1_task_all.txt", outer_horizon="full")
Out[161]:
{'mode': 'text',
 'path': '/Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/results/metrics_1_task_all.txt',
 'sections': 45}
In [162]:
saved_metric_plots = plot_metric_vs_horizon(
    final_frames,
    tickers=None,          
    models=None,           
    metrics=None,         
    include_full=True,
    save_dir="plots/task_1",
    show=True
)
print("Saved", len(saved_metric_plots), "charts to plots/task_1")


saved_y_plots = plot_h1_full_from_results(
    final_results_store,
    tickers=None,
    models=None,
    save_dir="plots/task_1/y_plots",
    show=True,
    verbose=True
)
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
Saved 45 charts to plots/task_1
  [EURUSD H=1] Simple_MLP: blob_shape=(757, 1, 2), yt=ok, yp=ok
  [EURUSD H=1] Simple_KAN: blob_shape=(757, 1, 2), yt=ok, yp=ok
  [EURUSD H=1] Simple_LSTM: blob_shape=(757, 1, 2), yt=ok, yp=ok
  [EURUSD H=1] ITransformer: blob_shape=(757, 1, 2), yt=ok, yp=ok
  [EURUSD H=1] GARCH: blob_shape=(758, 1, 2), yt=ok, yp=ok
[EURUSD H=1] actual=YES

[EURUSD H=1] Aligned series head (first 50 of 757 rows):
    Actual  Simple_MLP_pred  Simple_KAN_pred  Simple_LSTM_pred  ITransformer_pred  GARCH_pred
  0.739859         0.344704         0.392549          0.597757           0.947548    0.527567
  0.743822         0.445781         0.437255          0.740661           0.599173     0.54225
   1.02363         0.450836         0.505398          0.705366           0.333677    0.560192
 0.0199331         0.024966        0.0334082         0.0312601          0.0533302    0.542041
   0.52303         0.357369         0.462449          0.454133            1.39534    0.523388
  0.355217         0.462683         0.414474          0.585512            0.58706    0.514174
  0.417086         0.452745         0.749411          0.533248            1.32186    0.499552
  0.266624         0.516042         0.653394          0.541946            1.14001    0.490014
  0.312901         0.452725         0.505506          0.442269           0.445612    0.480418
0.00443731        0.0217347        0.0203908           0.02228          0.0256355    0.463381
  0.261753         0.319742         0.504155          0.244658           0.270375    0.450841
  0.788937         0.415738         0.362864          0.281103           0.819658    0.451272
  0.598342          0.49787          0.73489          0.564776            1.04084    0.455597
  0.723261         0.479105         0.549063          0.610733           0.797211    0.448468
  0.341589          0.33334         0.274472           0.65016           0.503877    0.434952
 0.0137579        0.0460323        0.0391602         0.0253707          0.0344898    0.434445
   0.29405         0.298564          0.17252          0.292281           0.451104    0.416651
  0.513047         0.348563         0.319324          0.311541           0.876412    0.414602
  0.252839         0.469047         0.489849          0.463047           0.978693    0.397076
  0.305402         0.337187         0.505978          0.415025           0.737488    0.377731
  0.226156         0.370188         0.166301          0.397258           0.225177    0.374779
0.00963577        0.0277823        0.0236111         0.0179488          0.0226416    0.364494
 0.0784947         0.287787         0.317479          0.199479            0.45876    0.357774
  0.199036         0.342416         0.260903          0.192285           0.492816    0.345893
  0.171632         0.375328         0.214984          0.245775           0.396353    0.317617
  0.201223         0.337522         0.321647          0.248536           0.240288     0.29398
  0.353565          0.27527          0.18525          0.229204           0.339252    0.283016
 0.0670925        0.0176184        0.0116163         0.0137525          0.0314016    0.286678
    0.2403         0.219411          0.27959          0.194458           0.214966    0.255759
  0.443555         0.367755         0.341669          0.274171            0.43017    0.232695
  0.418858         0.471934         0.319654          0.446601            0.58385    0.315411
  0.378402         0.305767         0.304753          0.464049             0.3852     0.32468
  0.644091         0.386463         0.240297          0.437104           0.169225    0.337647
 0.0152947        0.0437088        0.0250701         0.0271955          0.0398959    0.376331
  0.272258          0.26435         0.182403          0.356306           0.285023    0.375311
   0.23458         0.407319         0.303133          0.349715           0.459831    0.380742
  0.249941         0.356184          0.43941          0.363532           0.341011    0.364146
  0.705648         0.453441          0.30452          0.352722           0.344282    0.352639
  0.293732         0.340372         0.165647          0.476151           0.124775    0.367588
0.00988965        0.0148931        0.0140561         0.0200907          0.0414277     0.35363
  0.164063         0.234852         0.173574          0.227632           0.192007    0.336453
  0.414405         0.378224         0.387932          0.229056           0.427337    0.328718
  0.477121         0.320954         0.427589          0.356612           0.384118    0.317037
  0.264156         0.333569         0.531094          0.415628           0.349086    0.300313
  0.210154         0.383308         0.202868          0.349614           0.177211    0.288811
 0.0136265        0.0248596        0.0125404         0.0164581          0.0414452    0.318718
  0.265973         0.257019          0.22965           0.18229           0.227918     0.34367
  0.234507         0.304722         0.326707          0.246448           0.414127     0.35147
  0.284101         0.368278         0.261206          0.314976           0.249094    0.325053
  0.226239         0.273946         0.238938          0.324761           0.266824    0.328159
No description has been provided for this image
  [AAPL H=1] Simple_MLP: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [AAPL H=1] Simple_KAN: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [AAPL H=1] Simple_LSTM: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [AAPL H=1] ITransformer: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [AAPL H=1] GARCH: blob_shape=(405, 1, 2), yt=ok, yp=ok
[AAPL H=1] actual=YES

[AAPL H=1] Aligned series head (first 50 of 404 rows):
  Actual  Simple_MLP_pred  Simple_KAN_pred  Simple_LSTM_pred  ITransformer_pred  GARCH_pred
0.934885          1.31932          1.18825           2.16446            2.42068     2.25613
 1.65221          1.36129          1.17478           2.16442            2.42399     2.18043
 3.00982          1.35182          1.22232           2.16444            2.42258     2.06684
 7.17364          1.39072          1.41241           2.16458            2.42461     2.73913
 2.32415          2.30522          2.51576           2.16505            2.42585     3.17974
  1.8112          2.01698          1.96311           2.16512             2.4198     2.99427
 2.83463          1.85423           1.7408           2.16504            2.42037     2.85262
 2.29244          1.94267          1.99217           2.16502            2.42385      2.8749
 1.60917          1.86194           1.8384           2.16497            2.41991     2.80186
  1.9016          1.57238          1.59799           2.16487            2.41823     2.70591
 2.02761          1.52467          1.56333           2.16481             2.4219     2.59906
0.882809           1.7291          1.66265           2.16477            2.42444     2.61229
 1.75996          1.60188          1.39613           2.16466            2.42291     2.50785
 1.93843           1.6561          1.41594           2.16463            2.42397     2.61455
 1.30271          1.61721          1.41695           2.16463            2.42792     2.52174
 1.66381          1.56498          1.32174            2.1646            2.42643     2.40765
  1.7326          1.47644          1.33844           2.16459            2.42518     2.34043
 1.90607          1.44459          1.36064           2.16459            2.42626     2.54542
 2.70629          1.37841          1.30702           2.16461            2.42328     2.46568
 1.98734          1.62479          1.46902           2.16469            2.42522     2.33922
 4.17835          1.48164          1.48119           2.16471            2.42235     2.25764
 2.23484          1.90315          1.99231           2.16489            2.42604      2.2737
  2.5908           1.8621            1.807           2.16491            2.42951     2.19551
 1.38316          1.94774          1.82614           2.16493            2.42298     2.11403
 3.07923          1.73234          1.62413           2.16484            2.42563     2.02952
 2.16603          2.09576          1.86391           2.16487            2.42146     2.04042
 1.08108          1.92821          1.69196           2.16486            2.42662     1.97355
 1.05538          1.54711          1.39432           2.16475             2.4253      1.8765
 1.56663          1.45655          1.36753           2.16465            2.42512     1.82837
 1.20165          1.44119          1.36993            2.1646            2.42613     1.74378
 3.78232          1.34114          1.26677           2.16455            2.42153     1.72905
  1.6213           1.5621          1.60529           2.16471            2.42245     1.64485
 2.03959          1.56189          1.45785           2.16472            2.41814      1.6066
  2.6352          1.57972          1.50498           2.16473            2.42432     1.55211
 1.42274          1.82831           1.7305           2.16477            2.42128     1.46491
   2.873          1.82371          1.58312           2.16473            2.41962     1.52697
 1.51033           2.0945           1.7479           2.16478            2.42555     1.43632
 2.09779          1.92797          1.59163           2.16474            2.42007     1.35289
 4.03363          1.84557           1.6143           2.16473            2.42895      1.4147
 2.80041          2.13607          2.04788           2.16488            2.42278     1.74222
 2.38801          2.04364          1.91588           2.16495            2.42124     1.67716
  2.0338          1.89298          1.75553           2.16495            2.42527     1.66687
 2.19524          1.82104          1.68172            2.1649            2.42699     1.57648
 11.9895           1.8133          1.69822           2.16486            2.42736     1.68032
 3.66056          3.81002          4.04814           2.16561            2.42493     1.81217
  1.3156           3.2519          3.25078           2.16574            2.42666     1.72771
0.963659          2.62364          2.34477            2.1655            2.43195     1.74278
 1.08602          2.19714          2.19123            2.1652            2.42907     1.76822
 1.29415          1.98163          1.89388           2.16494            2.42724     1.68725
0.888571          1.84856          1.59722           2.16477            2.42529     1.60781
No description has been provided for this image
  [MSFT H=1] Simple_MLP: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [MSFT H=1] Simple_KAN: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [MSFT H=1] Simple_LSTM: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [MSFT H=1] ITransformer: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [MSFT H=1] GARCH: blob_shape=(405, 1, 2), yt=ok, yp=ok
[MSFT H=1] actual=YES

[MSFT H=1] Aligned series head (first 50 of 404 rows):
 Actual  Simple_MLP_pred  Simple_KAN_pred  Simple_LSTM_pred  ITransformer_pred  GARCH_pred
1.62646          1.73961          1.93734           1.66613            2.11871     2.67696
1.38102          2.43071           1.8169           1.89004            1.99044     2.58327
1.45815          1.59753          1.95379           1.72952            1.86909     2.57634
1.91787          1.84023          1.74501           1.84685            1.95442     2.48749
1.81463          2.68787          2.21117           2.05102            1.98984     2.42536
1.90777          2.19743          1.95498           2.09034             1.9772       2.401
1.13204          2.38266          1.71237           2.17862            2.02099     2.35936
2.16207          1.35764          1.87681           1.77582            2.25256     2.45703
1.60526          2.02502          1.89853           2.15715            1.78992     2.42708
1.36873          2.27203           1.9503            1.9103            1.63877     2.37856
1.54924          1.81255          1.85017           1.82842            1.94209     2.60355
1.38923          1.78413          1.60312           1.83067            1.84541     2.51095
1.71635          1.91094          1.56495           1.73631              1.889     2.42624
7.14713           2.3961          1.78968           1.85113            1.69845     2.59071
2.33716          2.48456          2.01067           4.09917            2.32677     2.50098
2.91188          2.52499          1.84571           2.21298            2.24735     2.45191
2.05814          1.53432          1.88379           2.45571            1.79204     2.42617
2.23013          3.30244          1.75319           2.45042            1.94899     2.46523
 2.9262          3.33622          1.75693            2.5458            1.80381     2.37413
2.44337          2.78813          1.59914           3.02481            2.18518     2.32784
3.72187          2.79149          1.65043           2.75773            1.90665     2.29773
2.39093          2.00911          1.64401           3.57372            2.11741     2.41964
3.38264          1.88123          1.65171           2.81986            2.07522     2.66366
1.96965          2.67584          1.77624           3.34941            1.81315     2.70796
5.97013          3.35572          1.78881           2.53327            2.08852      2.6235
3.19073          3.32474          1.93784           4.27986            2.00598     2.76685
1.62044          2.86234          1.84047           3.49473            2.16547     2.70262
1.86668          3.10489          1.88084           2.19354            1.77528     2.63664
1.46844          1.98705            2.001           2.25943            2.19713     2.65452
1.78506          3.50086          1.66713           2.07797            2.02741      2.5765
2.02385          3.07652          1.87267           2.18156            2.11874     2.54788
2.15305          1.72852          1.98298            2.2954            2.04137     2.54958
3.35866          2.60708          1.92345           2.44389            1.73794     2.45151
4.75959          1.84585          1.61317           3.32509            2.10025     2.38132
2.26205          2.59118          1.61836           4.26256            1.97186     2.27912
 2.5589           2.0442           1.6595           2.73261            1.92347     2.27234
28.4576          2.31255           1.6303           2.67998            2.05848     2.19075
5.29249           3.8194           3.6099           5.11279            2.53918     2.25135
5.58633           3.2335           3.5162           4.08792            2.69083      2.5873
2.95631          2.94772          3.80559           2.34364              2.357     3.13951
 2.9314          2.88633          3.11775           3.07119            2.91461     3.06237
2.15893          3.44504          2.74561            3.4766            2.14656       3.182
2.50397          2.72324          2.68655           2.81498            2.13434      3.0849
2.31466          4.21852          2.86412           2.82111            2.25579     3.19814
 1.9771          1.78674          2.55908           2.72812            1.79123     3.10159
1.16435          2.56281          2.53566           2.46767            2.39854     3.05142
1.01889          3.19063          2.23055           1.94746            2.22295     2.97549
1.35823          2.40673          2.00302           1.76752            1.95855     2.94902
1.42899          2.99753          2.21365           1.80012            2.30735     2.85005
1.02177          2.28681          2.38538           1.80112             1.8933     2.78264
No description has been provided for this image
  [GE H=1] Simple_MLP: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [GE H=1] Simple_KAN: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [GE H=1] Simple_LSTM: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [GE H=1] ITransformer: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [GE H=1] GARCH: blob_shape=(405, 1, 2), yt=ok, yp=ok
[GE H=1] actual=YES

[GE H=1] Aligned series head (first 50 of 404 rows):
  Actual  Simple_MLP_pred  Simple_KAN_pred  Simple_LSTM_pred  ITransformer_pred  GARCH_pred
  1.5135           2.5157          1.82701           4.74426            4.90177     3.49396
 1.72313          2.55159          1.85295           4.74426            4.90144     3.41691
 1.67254          2.35463          1.80839           4.74426            4.88897     3.39128
 1.66078          2.33543          1.82691           4.74426            4.90376     3.28505
0.769403          2.43806           1.8188           4.74426            4.89402      3.2313
 1.04217          2.46612          1.78896           4.74426             4.8961     3.17445
 3.65719          2.37674          1.82424           4.74426            4.90225     3.41098
 1.16201          2.48417           1.8419           4.74426            4.90733     3.35891
 4.30547           2.3359          1.81841           4.74426            4.90449     3.34239
 1.37445           2.3859          1.78913           4.74426             4.9037     3.40142
 1.38507          2.36139          1.76857           4.74426            4.90355      3.2851
 1.13075          2.39083          1.76484           4.74426            4.90507     3.26334
 1.22114           2.4184          1.76038           4.74426              4.906     3.19684
 1.99537          2.33616          1.74756           4.74426            4.89769     3.19972
 2.85347          2.38805          1.77607           4.74426            4.89784     3.26661
0.960334          2.37291          1.78551           4.74426            4.91071     3.32725
 2.10725          2.36867            1.769           4.74426            4.89743     3.23501
  2.0909          2.37139          1.76954           4.74426            4.89969     3.27368
 1.68286          2.43166          1.78114           4.74426            4.89575     3.23135
 3.11206           2.4029          1.77335           4.74426            4.90125     3.25283
 2.59462           2.4041           1.7987           4.74426             4.8961     3.32743
 4.00565          2.35743          1.78398           4.74426            4.89996      3.3743
 2.58083          2.40592           1.7918           4.74426            4.90413      3.3348
 1.92091          2.44247          1.78737           4.74426            4.90249     3.31599
 3.17544          2.43321          1.79314           4.74426            4.89897     3.23821
 2.71117          2.49108          1.83302           4.74426            4.90278      3.4706
 1.39353          2.41126          1.80224           4.74426            4.89609     3.44697
 2.01672          2.37411          1.81047           4.74426            4.89689     3.39451
 2.69781          2.36352          1.81523           4.74426            4.90401     3.40086
 2.05652          2.43732          1.80734           4.74426            4.89975     3.46363
 2.32011          2.36978          1.82103           4.74426            4.90351     3.48524
 1.69183          2.40341          1.81571           4.74426            4.90487     3.35579
 3.70289          2.36092          1.82401           4.74426            4.89737     3.27914
 3.22004          2.37268          1.78951           4.74426            4.90397     3.46478
  4.1739          2.41396          1.79158           4.74426            4.89754       3.357
  3.9362          2.40835          1.79842           4.74426            4.89511     3.28149
 20.9567            2.411          1.81224           4.74426            4.90337     3.21886
 4.07076          2.69309           1.9758           4.74426            4.90697     5.02601
 7.35977          2.58242          1.93576           4.74426            4.90033     4.97498
 4.59998           2.6468          1.96365           4.74426            4.90717     4.87581
 3.23707          2.62443          1.99464           4.74426            4.90922     4.83463
 3.27846          2.65764          1.97042           4.74426            4.90432     5.04858
 7.43036          2.60783          1.96397           4.74426            4.90245     4.72501
 2.44681          2.59649          2.00117           4.74426             4.9075     4.49056
 1.00844          2.51208          1.98914           4.74426            4.90105     4.27285
 2.67284          2.53862          1.95182           4.74426            4.90102     4.08314
 1.85009          2.55026          1.95059           4.74426            4.90479     4.22682
 2.73625          2.54788           1.9373           4.74426            4.90338     4.03415
 1.99492           2.6757          1.95216           4.74426            4.90712     4.01786
 1.32531            2.535           1.9329           4.74426            4.90489     3.84376
No description has been provided for this image
  [BAC H=1] Simple_MLP: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [BAC H=1] Simple_KAN: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [BAC H=1] Simple_LSTM: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [BAC H=1] ITransformer: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [BAC H=1] GARCH: blob_shape=(405, 1, 2), yt=ok, yp=ok
[BAC H=1] actual=YES

[BAC H=1] Aligned series head (first 50 of 404 rows):
 Actual  Simple_MLP_pred  Simple_KAN_pred  Simple_LSTM_pred  ITransformer_pred  GARCH_pred
2.28991          2.02336          1.73527            1.5972            2.44942     2.73077
2.58025          2.06892          2.12154           2.11232            2.49127     2.75432
1.43685          2.04514          1.95678           2.30313            2.67354     2.74301
1.40532          2.01722           2.1163           1.78249            2.45183     2.65866
1.62484          1.94887          1.56064           1.74315            2.44583     2.61076
2.44658          2.07675          1.89847           1.82515            2.40728     2.59086
1.56274          2.10387          2.08499           2.20721            2.53411     2.49363
2.04251          1.98151          1.82326           1.82682            2.45975     2.87667
1.92693          2.06384          2.02819           2.03837            2.45052     2.61658
 1.4117          1.97268          2.14076           1.99633            2.41124     2.64318
1.18859          1.98168          1.92861           1.75103            2.47713     2.69384
1.43173          2.04341          1.79974           1.63223            2.41634     2.47313
1.53413          2.06611          1.98964           1.71944            2.47113      2.3782
2.43259          2.07033          1.96343           1.76596            2.45636     2.30235
1.83194          2.06666          2.09387           2.17792            2.48818     2.69504
1.89469          2.00668          1.69005           1.94211            2.43554     2.77048
2.37737          1.99361          1.89902           1.96875            2.38906     2.53222
2.50765          2.02966          1.88654           2.19447            2.61292     2.72332
3.17295          2.15411          2.13127            2.2863            2.50527     2.53004
2.17531          2.05204          2.29708            2.5914            2.47628     2.58693
3.21212          2.11462          2.27001           2.20376            2.46161     2.46054
2.57597          1.99568           2.3459           2.63956            2.55376     3.25291
2.49883          2.03455          2.35397            2.4364            2.49827     4.13039
2.39942          1.99811          1.96501           2.40204            2.54944     3.33988
5.25542          2.07806          2.34025           2.35177            2.53058     2.93201
5.78329           2.2962          3.26226           3.26361            2.59309     2.63988
2.47379          2.44071          4.14277           3.67952            2.58013     2.59445
2.97723          2.26007          2.94199           2.67419            2.54209     3.56252
2.52012          2.28593          2.84612           2.84614            2.57675     2.98333
10.1661          2.38063          2.62345           2.60599            2.62695     2.68306
3.21457           3.0891          4.46423           4.34188            2.69702     2.49628
8.54499          2.58295          3.60336           3.22415            2.64541     2.48623
4.49535          3.01345          5.30069           4.64887            2.87203     3.20644
4.60281          3.07994          4.95801           4.13997            2.73257     2.95488
3.86461          3.05331          3.85276           4.04535             2.7279     2.85277
3.30898          2.76997          3.79204           3.73373            2.72473     3.36461
 2.6722          2.80854          3.99353           3.42756            2.75782     3.98841
2.11101          2.39098          3.59044           3.03333            2.55986     3.29871
 4.7242          2.24275          2.54688           2.61789            2.61351      2.9143
3.81735          2.94404          4.33871           3.53741            2.62013     3.39596
2.94351          2.53607          3.77452           3.45136            2.60619     4.76045
2.42652          2.68931          3.13097           3.08957            2.69582     4.46498
2.96869           2.2135          3.19559           2.73903            2.57388      4.6013
2.88004          2.65854          3.27197           2.90853            2.67116     3.68426
7.20205           2.2828           2.7364           2.86048            2.61076     6.28214
3.87685          2.95067          4.56107           4.00392            2.79416     6.06831
2.27628          2.56683          3.99099           3.58366            2.69483     4.53792
2.08444          2.41245          3.28472           2.78562            2.64155     3.65512
3.13418          2.16144          2.46892           2.48914            2.64696     3.19519
3.92102          2.35349          3.09289           2.90845            2.66943     3.03577
No description has been provided for this image
  [C H=1] Simple_MLP: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [C H=1] Simple_KAN: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [C H=1] Simple_LSTM: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [C H=1] ITransformer: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [C H=1] GARCH: blob_shape=(405, 1, 2), yt=ok, yp=ok
[C H=1] actual=YES

[C H=1] Aligned series head (first 50 of 404 rows):
 Actual  Simple_MLP_pred  Simple_KAN_pred  Simple_LSTM_pred  ITransformer_pred  GARCH_pred
 1.4361          2.05949          1.81032           1.77662            2.85181     2.58234
 2.4089          2.13617          1.73451            1.8528            2.72087     2.58922
1.70682          1.97474          1.85658           2.10131             2.7283     2.54011
 1.3802          2.03162          1.93442           1.95772            2.53394     2.46833
1.42651           1.8034          1.69791           1.86546            2.74602     2.61937
2.28623          1.60093          1.68659           1.86196            2.66537     2.51988
3.09043          2.00873          2.24761           2.07131            2.53103     2.45639
3.47712          2.28061          2.34615            2.3464            2.77401     3.15532
2.10921          2.43519          2.38761           2.53311            3.04835     3.17288
3.05691          1.79762          2.04992           2.15512            2.50703      3.2325
1.32975           2.1153           2.5041           2.39922             2.5995     3.03437
1.00699          2.21268          2.05861            1.9418            2.76617      2.8002
1.56839          1.82169          1.97584             1.812            2.75113     2.66095
1.79976          1.75211          1.68473           1.90672            2.76134     2.59247
3.40343          1.68992          1.89359           1.96826              2.678     2.64991
2.66085          1.69591           1.8453           2.43565            2.76671      2.8276
3.50115          1.71656          1.84266           2.29401            2.53176     2.68283
2.93494          1.78966          2.00558           2.55749            2.74792      2.8742
3.77812          1.92783          2.16165           2.43403            2.81849     2.72687
2.79098          2.52679           2.7315           2.71071            2.87076     2.89362
2.48685          2.64495          2.61462           2.44295            2.58238     2.73748
1.80238          2.16618          2.35058           2.31291            2.85845     2.76145
4.05301          2.21246           2.2845           2.08715            2.87287     2.82942
  2.247          2.57034          2.37631           2.74863            2.80395       2.695
3.71379          2.05942          2.28896           2.27083            2.91996     2.59339
4.11532           2.6942          2.49875           2.68286            2.72655     2.80695
3.12293           2.4156          2.66076           2.90458             2.8436     2.68858
2.66388          2.33024          2.56119           2.62207            2.85725     2.87006
5.69437          2.06756          2.55189           2.43296             2.7587     2.71265
9.77504          2.88279          3.21622            3.4128             2.8106     2.58721
4.82663          4.88662           4.3103           4.69949            2.90636     2.50811
5.00513          3.39613          4.04973           3.81661            2.88156     2.54929
4.18801          3.81033           4.2351           3.76236            2.85919     2.51673
2.87579          3.23046          4.02031           3.52093            2.92248     2.70154
3.95956          2.92423           3.4419           2.99555            2.86596     2.65958
2.62006          2.66915           3.1512           3.30196            2.80821      2.6259
1.79606           2.3638          2.91654           2.83876            2.84444      2.8674
2.26218          1.96569          2.46923           2.44145            2.85634     2.69901
6.96561          2.25966          2.43855           2.49864            2.81789     2.61602
3.21371          2.98949          3.21443           4.04928            2.83325     2.88442
3.60813          2.43714          2.97473           3.12445            2.91287     3.40004
2.14357          2.80448          3.03632           3.13934            2.91315     3.39642
2.83844          2.47278          2.83792           2.59734            2.84376     3.26715
2.47451          2.11743          2.54071           2.73929            2.75065     2.98448
5.06039          2.53872          2.50641           2.60048            2.88165     4.55295
2.61845          2.86881          2.91839           3.47909            2.88161     4.37708
1.97985          2.38972          2.81659           2.75621             2.9485     3.76624
   2.24          2.37572          2.51994           2.41292            2.70812     3.31658
5.28614          2.22935          2.63937           2.41857            2.80805     3.01539
1.59823          2.78158          3.12055           3.45286            2.91193     2.83713
No description has been provided for this image
  [BTCUSDT H=1] Simple_MLP: blob_shape=(480, 1, 2), yt=ok, yp=ok
  [BTCUSDT H=1] Simple_KAN: blob_shape=(480, 1, 2), yt=ok, yp=ok
  [BTCUSDT H=1] Simple_LSTM: blob_shape=(480, 1, 2), yt=ok, yp=ok
  [BTCUSDT H=1] ITransformer: blob_shape=(480, 1, 2), yt=ok, yp=ok
  [BTCUSDT H=1] GARCH: blob_shape=(481, 1, 2), yt=ok, yp=ok
[BTCUSDT H=1] actual=YES

[BTCUSDT H=1] Aligned series head (first 50 of 480 rows):
  Actual  Simple_MLP_pred  Simple_KAN_pred  Simple_LSTM_pred  ITransformer_pred  GARCH_pred
 6.92748          4.22565          4.10645           5.10271            6.41839     15.4974
 1.26781          2.53354          2.39846           5.22677            6.85443     8.51014
0.752504          1.78268          1.95099           3.08665            6.33748     7.38625
 10.0004          1.94661          4.10667           2.65069            6.29678     6.29784
 8.45084          6.71705          10.4203           6.45995            7.01173     25.0203
 5.32244          4.95906           7.3401           5.44647            6.36781     9.79193
 2.80991          3.91033          4.94077           3.87555            6.41213     6.91373
 5.32016          1.93833          2.76152           2.91372            6.26131      6.5617
 1.27538          1.15078         0.976106           4.02572            6.22643     18.5139
  3.3705            1.161          1.47332           2.66683            6.11175     7.57246
 4.30039          2.70718          2.80168           3.09955            6.45395     6.46378
  4.5337          3.63901          4.29719           3.25724            6.30434      11.047
 6.04033          2.61462          2.48211           3.14142            6.37096     7.17486
 4.38553          3.02479          2.99407           3.65192            6.48342     6.85069
 3.19392           2.1905           3.3258           3.06988             6.1683     6.55304
0.828641          1.43063          1.24625            2.7115             6.4729     5.96814
 1.72163          1.34968          1.50251           2.12012             6.0891     6.23868
 3.29458          2.23288          4.67267           2.25291            6.37708     6.89264
 2.52403          2.47752          6.42169           2.63118             6.2018     6.04899
 3.66699           3.9244          4.74214           2.23734            6.42311     6.43847
 3.61632          3.08895          3.06748            2.3893            6.20587     10.8539
 8.21189          3.36716          4.40629           2.36401            6.15734     8.84327
 2.21355           2.0948          1.53782            4.4513            5.83558     16.0553
 2.72723          1.55792           1.4317           2.31031            6.45938     7.62243
 6.91185          2.95095          2.99416           2.32257            6.24184     6.70523
  7.1909          5.44351          4.18534           4.25197            6.57445     12.2698
 6.48986          5.28778          4.08057           4.41681             6.4879     6.69666
 6.82806          4.18417          5.70952           4.04379             6.3712     15.8625
 4.04652          4.48997          4.52335           4.26598            6.32422     7.10354
 3.11243          1.63272          1.44161           3.32494            5.77862     6.58045
  2.5171          2.04086          1.76629           3.07009            5.72366     7.24674
 2.75687          2.45268          3.18061           2.88489            5.99425     6.82247
 6.87132          4.49213          8.17688           2.83494            6.06518     7.08597
 4.43353          4.77557          6.34874           4.35616            6.34793     6.88433
 4.92989          3.85498           5.4101           3.18841            6.61877     6.99391
 3.08011          1.93112          2.61154           3.20835            5.65607     7.55735
 1.10511          1.47487         0.878115           2.66455             6.2181      7.4489
 1.37997          1.34808          1.55578             2.169            5.75637     7.52075
 6.66109          1.86834          2.54087           2.17931            6.08828     6.40363
 13.9372          5.71324          7.84822           4.19853            6.47281     22.1983
 26.0966          9.03072          9.91963           6.71274            6.21498     19.8492
 18.2381          12.6697          14.0678           9.86168            6.31501      61.544
  8.3292          9.65288          10.5323           9.43658            6.88901     14.8119
 2.68908          3.19934          4.82262           6.44326            6.57973     9.30873
 3.12688          2.71336          4.49923           4.63277            6.23692      7.2574
 23.8375          4.86806          8.23276           5.40988             6.3905     8.01182
 50.1271          12.6143          22.3594            11.946            6.82711     44.5863
 25.8546          19.1892            33.43           20.0921            7.33057     46.8263
 9.41359          16.2393          18.7977           15.5296            7.02566     18.0017
 16.7212          8.70803          10.8569           9.34476            7.12477     7.66735
No description has been provided for this image
  [GOLD H=1] Simple_MLP: blob_shape=(1107, 1, 2), yt=ok, yp=ok
  [GOLD H=1] Simple_KAN: blob_shape=(1107, 1, 2), yt=ok, yp=ok
  [GOLD H=1] Simple_LSTM: blob_shape=(1107, 1, 2), yt=ok, yp=ok
  [GOLD H=1] ITransformer: blob_shape=(1107, 1, 2), yt=ok, yp=ok
  [GOLD H=1] GARCH: blob_shape=(1108, 1, 2), yt=ok, yp=ok
[GOLD H=1] actual=YES

[GOLD H=1] Aligned series head (first 50 of 1107 rows):
   Actual  Simple_MLP_pred  Simple_KAN_pred  Simple_LSTM_pred  ITransformer_pred  GARCH_pred
 0.983207         0.422219          0.23986          0.571752           0.208339    0.685881
0.0725466         0.431938        0.0207378         0.0498391           0.236039    0.680604
 0.365228         0.428538         0.378341          0.539055           0.180121    0.658805
 0.675363         0.604657         0.674003          0.596529           0.242638    0.639727
  1.19577         0.382046         0.752431           0.74542           0.165433    0.638002
 0.396614         0.619541         0.533796          0.858823           0.306933    0.723233
 0.569428         0.414257         0.272703          0.674106           0.329168    0.718785
0.0394832         0.581621         0.348653         0.0488642           0.366324    0.779457
 0.540289         0.394882         0.295553          0.460947           0.231783     0.76829
 0.645375         0.431192         0.491561          0.594452           0.215055    0.765542
 0.596069         0.567287         0.517611          0.699126           0.256235     0.74924
 0.496734         0.457365         0.507105          0.632185           0.241731    0.743014
 0.986594         0.380398         0.453661          0.546185           0.186387    0.730231
0.0170706        0.0477885        0.0228274         0.0441966           0.169985    0.729788
 0.388559         0.503325         0.338773          0.464354           0.233847    0.727037
 0.616087         0.494338         0.524899          0.547338           0.195596    0.738527
 0.603893         0.511328         0.591502          0.696161           0.253194    0.758962
 0.739671          0.45313         0.521339           0.64251           0.220937    0.764986
 0.832053         0.475963         0.474508          0.647189           0.220221    0.773268
0.0110286        0.0358669        0.0225909         0.0456166           0.196558    0.810121
 0.506412         0.491655          0.33213           0.47311            0.20611    0.786416
 0.392005         0.441145         0.473468          0.607242           0.236699    0.655122
  0.80787         0.541264         0.520275          0.664098           0.228026    0.800724
 0.535457         0.472579         0.604974          0.669592           0.233856    0.829303
 0.837414         0.496366         0.439857          0.590027           0.206479    0.858639
0.0220968        0.0442664        0.0173706         0.0427142           0.181072     0.89187
 0.498523         0.488293         0.390587          0.449317           0.236574    0.823867
 0.472977         0.427078         0.399705          0.587656           0.207432    0.805548
  1.61389         0.505716         0.470363          0.680659           0.234607    0.835249
 0.641823         0.555861         0.971117          0.892287           0.228295    0.859374
 0.600193         0.529455          0.56877          0.703771           0.248334    0.894679
0.0226182        0.0747826        0.0285722         0.0438857           0.205992    0.837805
 0.411008         0.537367         0.422029          0.465519            0.19087    0.787663
 0.743739         0.432418         0.493226          0.554816           0.232542    0.807656
 0.369762         0.578067         0.607623          0.708233           0.262809    0.824197
  0.57285         0.448126         0.568941          0.572638            0.26489    0.801787
  1.20635         0.417208         0.533148          0.561875           0.196172    0.824994
0.0659951        0.0432381        0.0272632         0.0460365           0.184604    0.804215
  1.33651         0.485215          0.40125          0.506112           0.233862    0.816732
 0.970258         0.556679         0.915238          0.920184           0.228134    0.996608
 0.553171         0.661553         0.771861          0.950451            0.27655     1.00121
 0.193903         0.509757         0.660033          0.805476           0.243418    0.919109
  1.26963         0.435644         0.442191          0.541587           0.286094    0.911807
 0.222961         0.102008        0.0309558         0.0638977            0.20381    0.873841
 0.483934         0.522787         0.520647          0.710576           0.248931    0.875288
   1.3403         0.428168         0.564224          0.718952           0.284976    0.834209
  1.06602          0.74433         0.881867          0.876445           0.249181     0.86112
 0.692459         0.242818         0.517915          0.902223            0.23706    0.886151
 0.757949         0.521484         0.435654            0.7749           0.197776    0.856254
0.0295473         0.114105        0.0548944         0.0586249           0.238353    0.835811
No description has been provided for this image
  [SP500 H=1] Simple_MLP: blob_shape=(694, 1, 2), yt=ok, yp=ok
  [SP500 H=1] Simple_KAN: blob_shape=(694, 1, 2), yt=ok, yp=ok
  [SP500 H=1] Simple_LSTM: blob_shape=(694, 1, 2), yt=ok, yp=ok
  [SP500 H=1] ITransformer: blob_shape=(694, 1, 2), yt=ok, yp=ok
  [SP500 H=1] GARCH: blob_shape=(695, 1, 2), yt=ok, yp=ok
[SP500 H=1] actual=YES

[SP500 H=1] Aligned series head (first 50 of 694 rows):
   Actual  Simple_MLP_pred  Simple_KAN_pred  Simple_LSTM_pred  ITransformer_pred  GARCH_pred
   1.0351          1.09275          1.18393           1.55687           0.394359    0.799467
  1.03491          1.80206           1.3492           1.08629           0.422129    0.796043
 0.818223          1.54565           1.0302          0.897907           0.396443    0.809952
0.0217497         0.045146           0.3984         0.0767051           0.365226    0.757815
 0.502587         0.269231         0.582129          0.760349           0.354194    0.704544
  2.13502         0.890721         0.512209          0.672992           0.347545    0.820297
 0.903485          2.20735          1.04372           1.57803           0.377749    0.765318
 0.898743         0.903324          1.13008          0.958117            0.43054     0.72972
 0.904151         0.568081         0.737492          0.823811           0.385761    0.906819
0.0405714        0.0321466           0.2803         0.0693383           0.311338    0.852453
 0.113815         0.334695         0.628761          0.804945           0.336537    0.796007
  0.60793         0.820503         0.793502          0.402075           0.326805    0.742223
  0.92855         0.655194         0.706013          0.523316           0.391597    0.894725
 0.794209         0.699873         0.781745          0.788148           0.381961    0.834901
 0.910428         0.478748         0.496577          0.624998           0.350679    0.782076
0.0104435        0.0656389         0.201243         0.0478977           0.334912    0.795224
 0.610551          0.16914         0.401153          0.684136             0.3342    0.741294
 0.532918         0.841029         0.945009          0.639523           0.366766    0.708376
 0.832645         0.635857         0.881733          0.668783            0.34164    0.705748
 0.902649         0.898833         0.879433          0.737364           0.361889    0.666399
 0.540703         0.551887         0.660904           0.72802           0.315444    0.646531
0.0320152        0.0265908         0.119123         0.0430875           0.314461    0.854385
  0.44739         0.510448         0.404193          0.412501           0.333362    0.805102
  1.17379         0.574764         0.761066          0.489611           0.350494    0.763888
 0.784725          1.34082         0.875697           1.01457           0.367478    0.887074
  1.19858          1.01257         0.906342            0.7192           0.338609    0.836034
   3.1834         0.551616         0.599364           0.97001           0.330019     1.06951
  1.95774        0.0475945         0.500037          0.213057           0.323595     1.11032
  5.83359          1.33327          1.14272           1.69194           0.340549     1.15077
  1.91438          4.60719          1.89317           3.38312           0.490163     1.17816
  2.82295          3.51623          1.78432           2.36367           0.427311     1.27191
  1.97836          3.05436          1.80666           2.40322           0.485227     1.22346
  1.67848          1.67403          1.72195            1.6257           0.423387     1.39471
 0.231827         0.581981          1.21457           1.58846           0.422853     1.37889
  1.99306          1.20666          1.47822           0.88852           0.448867     1.31051
 0.595525          1.55252          1.33087           1.35574           0.417849     1.23252
   2.0958          1.25003          1.16848           1.06767           0.429391      1.2776
  1.44715          1.61855          1.19434           1.61631           0.432833     1.36154
   1.8779          1.32908          1.15891           1.09801           0.400154     1.27896
0.0836623        0.0554083         0.599128          0.190105           0.366675     1.21785
 0.723351          1.22303          1.27175           1.21298           0.426686     1.15966
 0.463055         0.791861         0.843901           1.00817           0.368699     1.09123
 0.331959          0.93101          1.08082          0.923874           0.430165     1.02859
 0.355102         0.592384         0.816007          0.567945           0.356518     1.04923
 0.359515         0.699186         0.686743          0.237356           0.388032     1.03438
0.0698865         0.155076         0.120673         0.0445153           0.355785     1.09259
 0.418851         0.417567         0.626095           0.38103           0.340955     1.02865
 0.479569         0.614998         0.507444          0.552917           0.338702    0.988313
 0.468942         0.757813         0.605931          0.603309           0.416061    0.937865
 0.278331         0.442242         0.546351          0.496965           0.319164    0.897153
No description has been provided for this image

Results on models with more inputs¶

In [823]:
load_data_object_11_file_path = os.path.join(root_folder, objects_relative_path, "structured_data_dict_11.pkl")
with open(load_data_object_11_file_path, "rb") as f:
    structured_data_dict_11 = pickle.load(f)
print("Data dictionary 11 loaded successfully.")

X_price_map, X_time_map, y_map = {}, {}, {}

missing = []
for t in tickers:  
    if t not in structured_data_dict_11:      
        missing.append(t)
        continue

    entry = structured_data_dict_11[t]         

    Xp = entry.get("X_other", None)
    Xt = entry.get("X_time",  None)
    Y  = entry.get("y",       None)

    if Xp is None or Y is None:
        print(f"[WARN] {t}: missing {'X_other' if Xp is None else ''}"
              f"{' and ' if (Xp is None and Y is None) else ''}"
              f"{'y' if Y is None else ''} → skipping.")
        continue

    Xp = np.asarray(Xp, dtype=float)
    Xt = None if Xt is None else np.asarray(Xt, dtype=float)
    Y  = np.asarray(Y,  dtype=float)

    if Xp.ndim != 3 or Y.ndim != 3:
        print(f"[WARN] {t}: unexpected dims X_other={Xp.shape}, y={Y.shape} → skipping.")
        continue
    if Xt is not None and Xt.ndim != 3:
        print(f"[WARN] {t}: unexpected dims X_time={Xt.shape} → setting to None.")
        Xt = None

    X_price_map[t] = Xp
    X_time_map[t]  = Xt
    y_map[t]       = Y

if missing:
    print(f"[INFO] Missing tickers in data dict (skipped): {missing}")


multi_input_results_store = {}


MULTI_INPUT_BASE_SAVE_DIR = os.path.join(root_folder, objects_relative_path, "multi_input_results_saved_object")
os.makedirs(MULTI_INPUT_BASE_SAVE_DIR, exist_ok=True)


mutiple_inputs_models = ["Simple_KAN"]  

_ = run_all_models_for_all(
    tickers=final_tickers,                
    horizons=final_horizons,
    model_list=mutiple_inputs_models,
    X_price_map=X_price_map,
    X_time_map=X_time_map,
    y_map=y_map,
    base_save_dir=MULTI_INPUT_BASE_SAVE_DIR,
    fixed_params=FIXED_PARAMS_MERGED,
    common_params=COMMON,
    model_io=MODEL_IO,
    multitask_models=MULTI_TASK_MODELS,
    param_grids=None,
    results_store=multi_input_results_store, 
    overwrite=False
)
Data dictionary 11 loaded successfully.

=== AAPL | H=1 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714034
  Min value:  -10.367763820153055
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.13326641523511
  Min value:  -2.354189421011421
Checking X_price_val:
Shape: (161, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5738558883507405
  Min value:  -10.367763820153055
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.906984328061974
  Min value:  -1.6295009124362931
Checking X_price_test:
Shape: (404, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.9402408075062174
  Min value:  -10.367763820153055
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.8950185841626346
  Min value:  -2.119931409321887
Epoch 1: Train mse = 0.9633 | Val mse = 0.6433
Epoch 2: Train mse = 0.7840 | Val mse = 0.5237
Epoch 3: Train mse = 0.6934 | Val mse = 1.0252
Epoch 4: Train mse = 0.6236 | Val mse = 0.8990
Epoch 5: Train mse = 0.5647 | Val mse = 1.3106
Epoch 6: Train mse = 0.5361 | Val mse = 1.5061
Epoch 7: Train mse = 0.4902 | Val mse = 1.6780
Epoch 8: Train mse = 0.4502 | Val mse = 2.0699
Epoch 9: Train mse = 0.4115 | Val mse = 2.2300
Epoch 10: Train mse = 0.3920 | Val mse = 2.6834
Epoch 11: Train mse = 0.3711 | Val mse = 2.2590
Epoch 12: Train mse = 0.3381 | Val mse = 2.5199
Epoch 13: Train mse = 0.3218 | Val mse = 3.0646
Epoch 14: Train mse = 0.2947 | Val mse = 2.8705
Epoch 15: Train mse = 0.2788 | Val mse = 2.7621
Epoch 16: Train mse = 0.2621 | Val mse = 2.9090
Epoch 17: Train mse = 0.2421 | Val mse = 3.5889
Epoch 18: Train mse = 0.2289 | Val mse = 3.4238
Epoch 19: Train mse = 0.2233 | Val mse = 3.7965
Epoch 20: Train mse = 0.2078 | Val mse = 3.3491
Epoch 21: Train mse = 0.1950 | Val mse = 3.5481
Epoch 22: Train mse = 0.1733 | Val mse = 3.4025
Epoch 23: Train mse = 0.1689 | Val mse = 3.9105
Epoch 24: Train mse = 0.1560 | Val mse = 4.1867
Epoch 25: Train mse = 0.1427 | Val mse = 4.6966
Epoch 26: Train mse = 0.1383 | Val mse = 4.7542
Epoch 27: Train mse = 0.1436 | Val mse = 4.5108
Epoch 28: Train mse = 0.1210 | Val mse = 3.8529
Epoch 29: Train mse = 0.1113 | Val mse = 3.4801
Epoch 30: Train mse = 0.0962 | Val mse = 3.6740
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 1
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.46251029
1 day(s) RMSE                      : 8.91392386
1 day(s) R2                        : -0.06377748
1 day(s) Pearson r                 : -0.09094064
1 day(s) QLIKE                     : 0.57576874
full horizon MAE                   : 2.46251029
full horizon RMSE                  : 8.91392386
full horizon R2                    : -0.06377748
full horizon Pearson r             : -0.09094064
full horizon QLIKE                 : 0.57576874

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/AAPL/Simple_KAN_H1.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=1.15621, max=2.02448

=== AAPL | H=5 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714034
  Min value:  -10.367763820153055
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.140231129276094
  Min value:  -2.358314580069225
Checking X_price_val:
Shape: (161, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5738558883507405
  Min value:  -10.367763820153055
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.907938252663739
  Min value:  -1.632926464574096
Checking X_price_test:
Shape: (404, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.9402408075062174
  Min value:  -10.367763820153055
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.896926346992255
  Min value:  -2.123830418076347
Epoch 1: Train mse = 0.9487 | Val mse = 0.9255
Epoch 2: Train mse = 0.7751 | Val mse = 0.4594
Epoch 3: Train mse = 0.7121 | Val mse = 1.2533
Epoch 4: Train mse = 0.6457 | Val mse = 1.2468
Epoch 5: Train mse = 0.5652 | Val mse = 1.5448
Epoch 6: Train mse = 0.5087 | Val mse = 2.1187
Epoch 7: Train mse = 0.4671 | Val mse = 2.8582
Epoch 8: Train mse = 0.4413 | Val mse = 2.4983
Epoch 9: Train mse = 0.4452 | Val mse = 1.8795
Epoch 10: Train mse = 0.4194 | Val mse = 2.3950
Epoch 11: Train mse = 0.4043 | Val mse = 2.3962
Epoch 12: Train mse = 0.3834 | Val mse = 1.9825
Epoch 13: Train mse = 0.3717 | Val mse = 2.5739
Epoch 14: Train mse = 0.3576 | Val mse = 2.3845
Epoch 15: Train mse = 0.3487 | Val mse = 2.3354
Epoch 16: Train mse = 0.3379 | Val mse = 2.6594
Epoch 17: Train mse = 0.3321 | Val mse = 2.2352
Epoch 18: Train mse = 0.3302 | Val mse = 2.3320
Epoch 19: Train mse = 0.3207 | Val mse = 2.5188
Epoch 20: Train mse = 0.3164 | Val mse = 2.5334
Epoch 21: Train mse = 0.3035 | Val mse = 2.2577
Epoch 22: Train mse = 0.2929 | Val mse = 2.4497
Epoch 23: Train mse = 0.2912 | Val mse = 2.2135
Epoch 24: Train mse = 0.2831 | Val mse = 2.0641
Epoch 25: Train mse = 0.2717 | Val mse = 2.0751
Epoch 26: Train mse = 0.2629 | Val mse = 2.2712
Epoch 27: Train mse = 0.2542 | Val mse = 2.1547
Epoch 28: Train mse = 0.2444 | Val mse = 2.3112
Epoch 29: Train mse = 0.2377 | Val mse = 2.2295
Epoch 30: Train mse = 0.2297 | Val mse = 2.3981
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 5
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.42054486
1 day(s) RMSE                      : 8.88848184
1 day(s) R2                        : -0.05771370
1 day(s) Pearson r                 : 0.02753822
1 day(s) QLIKE                     : 0.53149894
3 day(s) MAE                       : 2.48963717
3 day(s) RMSE                      : 9.13267811
3 day(s) R2                        : -0.05539978
3 day(s) Pearson r                 : 0.02733509
3 day(s) QLIKE                     : 0.54992576
5 day(s) MAE                       : 2.52338940
5 day(s) RMSE                      : 9.19584237
5 day(s) R2                        : -0.05824183
5 day(s) Pearson r                 : 0.00822495
5 day(s) QLIKE                     : 0.55751983
full horizon MAE                   : 2.52338940
full horizon RMSE                  : 9.19584237
full horizon R2                    : -0.05824183
full horizon Pearson r             : 0.00822495
full horizon QLIKE                 : 0.55751983

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/AAPL/Simple_KAN_H5.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=1.20653, max=1.82501

=== AAPL | H=10 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 10
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714034
  Min value:  -10.367763820153055
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.14985814599714
  Min value:  -2.3634631558377808
Checking X_price_val:
Shape: (161, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5738558883507405
  Min value:  -10.367763820153055
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.909556794039435
  Min value:  -1.6371429190585964
Checking X_price_test:
Shape: (404, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.9402408075062174
  Min value:  -10.367763820153055
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.8998157347365474
  Min value:  -2.1286776824868885
Epoch 1: Train mse = 0.9547 | Val mse = 1.1972
Epoch 2: Train mse = 0.8281 | Val mse = 0.4714
Epoch 3: Train mse = 0.7662 | Val mse = 0.9059
Epoch 4: Train mse = 0.6882 | Val mse = 1.6323
Epoch 5: Train mse = 0.6384 | Val mse = 1.7941
Epoch 6: Train mse = 0.5806 | Val mse = 1.4152
Epoch 7: Train mse = 0.5398 | Val mse = 1.7945
Epoch 8: Train mse = 0.5054 | Val mse = 1.8864
Epoch 9: Train mse = 0.4825 | Val mse = 2.4531
Epoch 10: Train mse = 0.4532 | Val mse = 2.5392
Epoch 11: Train mse = 0.4356 | Val mse = 2.5550
Epoch 12: Train mse = 0.4249 | Val mse = 2.4789
Epoch 13: Train mse = 0.4135 | Val mse = 2.8994
Epoch 14: Train mse = 0.4054 | Val mse = 2.7287
Epoch 15: Train mse = 0.3949 | Val mse = 2.6257
Epoch 16: Train mse = 0.3810 | Val mse = 2.3369
Epoch 17: Train mse = 0.3769 | Val mse = 2.4368
Epoch 18: Train mse = 0.3644 | Val mse = 2.4593
Epoch 19: Train mse = 0.3490 | Val mse = 2.5198
Epoch 20: Train mse = 0.3404 | Val mse = 2.3580
Epoch 21: Train mse = 0.3291 | Val mse = 2.3953
Epoch 22: Train mse = 0.3214 | Val mse = 2.4326
Epoch 23: Train mse = 0.3189 | Val mse = 2.6236
Epoch 24: Train mse = 0.3098 | Val mse = 2.4413
Epoch 25: Train mse = 0.3020 | Val mse = 2.3883
Epoch 26: Train mse = 0.2987 | Val mse = 2.2625
Epoch 27: Train mse = 0.2929 | Val mse = 2.3990
Epoch 28: Train mse = 0.2877 | Val mse = 2.3672
Epoch 29: Train mse = 0.2800 | Val mse = 2.2357
Epoch 30: Train mse = 0.2748 | Val mse = 2.3463
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 10
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.45663669
1 day(s) RMSE                      : 8.90652078
1 day(s) R2                        : -0.06201126
1 day(s) Pearson r                 : -0.02446050
1 day(s) QLIKE                     : 0.54500584
3 day(s) MAE                       : 2.50791224
3 day(s) RMSE                      : 9.13810199
3 day(s) R2                        : -0.05665375
3 day(s) Pearson r                 : -0.01859557
3 day(s) QLIKE                     : 0.56133092
5 day(s) MAE                       : 2.52533396
5 day(s) RMSE                      : 9.18665284
5 day(s) R2                        : -0.05612786
5 day(s) Pearson r                 : -0.02795649
5 day(s) QLIKE                     : 0.56631284
10 day(s) MAE                      : 2.56554233
10 day(s) RMSE                     : 9.24094562
10 day(s) R2                       : -0.05985769
10 day(s) Pearson r                : -0.04688197
10 day(s) QLIKE                    : 0.57715424
full horizon MAE                   : 2.56554233
full horizon RMSE                  : 9.24094562
full horizon R2                    : -0.05985769
full horizon Pearson r             : -0.04688197
full horizon QLIKE                 : 0.57715424

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/AAPL/Simple_KAN_H10.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=1.06221, max=1.94837

=== AAPL | H=20 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714034
  Min value:  -10.367763820153055
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.17404540383974
  Min value:  -2.37751047680313
Checking X_price_val:
Shape: (161, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5738558883507405
  Min value:  -10.367763820153055
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.9130206404899854
  Min value:  -1.6487782037743786
Checking X_price_test:
Shape: (404, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.9402408075062174
  Min value:  -10.367763820153055
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.906568131251477
  Min value:  -2.14194530457574
Epoch 1: Train mse = 0.9645 | Val mse = 1.1664
Epoch 2: Train mse = 0.8362 | Val mse = 0.5216
Epoch 3: Train mse = 0.7714 | Val mse = 0.8549
Epoch 4: Train mse = 0.7125 | Val mse = 1.5340
Epoch 5: Train mse = 0.6542 | Val mse = 1.2953
Epoch 6: Train mse = 0.6011 | Val mse = 1.9058
Epoch 7: Train mse = 0.5590 | Val mse = 2.4031
Epoch 8: Train mse = 0.5326 | Val mse = 2.6154
Epoch 9: Train mse = 0.5270 | Val mse = 2.1960
Epoch 10: Train mse = 0.5055 | Val mse = 2.6145
Epoch 11: Train mse = 0.4870 | Val mse = 2.3444
Epoch 12: Train mse = 0.4686 | Val mse = 2.1335
Epoch 13: Train mse = 0.4490 | Val mse = 2.4713
Epoch 14: Train mse = 0.4331 | Val mse = 2.1391
Epoch 15: Train mse = 0.4198 | Val mse = 2.1328
Epoch 16: Train mse = 0.4052 | Val mse = 2.1963
Epoch 17: Train mse = 0.4027 | Val mse = 1.9930
Epoch 18: Train mse = 0.3914 | Val mse = 1.9027
Epoch 19: Train mse = 0.3854 | Val mse = 1.9867
Epoch 20: Train mse = 0.3787 | Val mse = 1.9238
Epoch 21: Train mse = 0.3702 | Val mse = 1.8438
Epoch 22: Train mse = 0.3587 | Val mse = 2.0834
Epoch 23: Train mse = 0.3536 | Val mse = 1.8663
Epoch 24: Train mse = 0.3429 | Val mse = 2.1530
Epoch 25: Train mse = 0.3356 | Val mse = 1.9702
Epoch 26: Train mse = 0.3320 | Val mse = 2.0708
Epoch 27: Train mse = 0.3304 | Val mse = 2.1160
Epoch 28: Train mse = 0.3254 | Val mse = 2.3419
Epoch 29: Train mse = 0.3175 | Val mse = 2.0250
Epoch 30: Train mse = 0.3120 | Val mse = 2.1874
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 20
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.41009828
1 day(s) RMSE                      : 8.86153059
1 day(s) R2                        : -0.05130912
1 day(s) Pearson r                 : -0.07825994
1 day(s) QLIKE                     : 0.55783030
3 day(s) MAE                       : 2.49315904
3 day(s) RMSE                      : 9.12224975
3 day(s) R2                        : -0.05299089
3 day(s) Pearson r                 : -0.06100949
3 day(s) QLIKE                     : 0.57087987
5 day(s) MAE                       : 2.51891002
5 day(s) RMSE                      : 9.17800031
5 day(s) R2                        : -0.05413935
5 day(s) Pearson r                 : -0.07083658
5 day(s) QLIKE                     : 0.57605450
10 day(s) MAE                      : 2.55767078
10 day(s) RMSE                     : 9.22998776
10 day(s) R2                       : -0.05734564
10 day(s) Pearson r                : -0.07705607
10 day(s) QLIKE                    : 0.58552514
20 day(s) MAE                      : 2.60693213
20 day(s) RMSE                     : 9.26869046
20 day(s) R2                       : -0.06169602
20 day(s) Pearson r                : -0.10400603
20 day(s) QLIKE                    : 0.59590654
full horizon MAE                   : 2.60693213
full horizon RMSE                  : 9.26869046
full horizon R2                    : -0.06169602
full horizon Pearson r             : -0.10400603
full horizon QLIKE                 : 0.59590654

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/AAPL/Simple_KAN_H20.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=1.16321, max=2.06414

=== MSFT | H=1 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  15.662699184664
  Min value:  -6.772192238786855
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.2181198470171735
  Min value:  -2.80514498305857
Checking X_price_val:
Shape: (161, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.716447710866394
  Min value:  -4.319177175086081
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.894759677334685
  Min value:  -1.3150487901568384
Checking X_price_test:
Shape: (404, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.94429083559171
  Min value:  -6.772192238873022
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.930619914036178
  Min value:  -2.329935051243564
Epoch 1: Train mse = 0.9464 | Val mse = 2.5508
Epoch 2: Train mse = 0.7457 | Val mse = 0.5342
Epoch 3: Train mse = 0.6407 | Val mse = 0.5153
Epoch 4: Train mse = 0.5298 | Val mse = 0.8944
Epoch 5: Train mse = 0.4840 | Val mse = 0.6620
Epoch 6: Train mse = 0.4487 | Val mse = 0.8374
Epoch 7: Train mse = 0.4112 | Val mse = 0.9100
Epoch 8: Train mse = 0.3752 | Val mse = 0.7831
Epoch 9: Train mse = 0.3474 | Val mse = 1.2324
Epoch 10: Train mse = 0.3080 | Val mse = 1.4500
Epoch 11: Train mse = 0.2795 | Val mse = 1.2074
Epoch 12: Train mse = 0.2580 | Val mse = 1.3631
Epoch 13: Train mse = 0.2404 | Val mse = 1.3818
Epoch 14: Train mse = 0.2135 | Val mse = 1.8920
Epoch 15: Train mse = 0.1937 | Val mse = 1.5696
Epoch 16: Train mse = 0.1708 | Val mse = 2.0351
Epoch 17: Train mse = 0.1426 | Val mse = 1.7540
Epoch 18: Train mse = 0.1313 | Val mse = 1.7492
Epoch 19: Train mse = 0.1160 | Val mse = 1.9255
Epoch 20: Train mse = 0.1157 | Val mse = 2.0014
Epoch 21: Train mse = 0.0972 | Val mse = 1.4587
Epoch 22: Train mse = 0.0818 | Val mse = 2.0170
Epoch 23: Train mse = 0.0719 | Val mse = 1.9197
Epoch 24: Train mse = 0.0689 | Val mse = 1.7806
Epoch 25: Train mse = 0.0528 | Val mse = 1.8706
Epoch 26: Train mse = 0.0421 | Val mse = 2.4418
Epoch 27: Train mse = 0.0371 | Val mse = 2.0068
Epoch 28: Train mse = 0.0361 | Val mse = 2.0571
Epoch 29: Train mse = 0.0269 | Val mse = 2.1904
Epoch 30: Train mse = 0.0254 | Val mse = 2.0178
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 1
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.28082293
1 day(s) RMSE                      : 6.67158377
1 day(s) R2                        : 0.00928429
1 day(s) Pearson r                 : 0.11014010
1 day(s) QLIKE                     : 0.44375689
full horizon MAE                   : 2.28082293
full horizon RMSE                  : 6.67158377
full horizon R2                    : 0.00928429
full horizon Pearson r             : 0.11014010
full horizon QLIKE                 : 0.44375689

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/MSFT/Simple_KAN_H1.pkl

Saved y_true min=0.27301, max=89.8516
Saved y_pred min=1.00914, max=5.44325

=== MSFT | H=5 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  15.662699184664
  Min value:  -6.772192238786855
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.21838735918136
  Min value:  -2.8084790413953242
Checking X_price_val:
Shape: (161, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.716447710866394
  Min value:  -4.319177175086081
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.8943485628110612
  Min value:  -1.3176187186164257
Checking X_price_test:
Shape: (404, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.94429083559171
  Min value:  -6.772192238873022
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9307399945851973
  Min value:  -2.33302541920076
Epoch 1: Train mse = 0.9502 | Val mse = 1.1441
Epoch 2: Train mse = 0.7008 | Val mse = 0.6693
Epoch 3: Train mse = 0.6086 | Val mse = 0.8942
Epoch 4: Train mse = 0.5342 | Val mse = 1.2981
Epoch 5: Train mse = 0.5076 | Val mse = 0.8888
Epoch 6: Train mse = 0.4534 | Val mse = 0.9899
Epoch 7: Train mse = 0.4244 | Val mse = 1.4743
Epoch 8: Train mse = 0.3910 | Val mse = 1.1263
Epoch 9: Train mse = 0.3627 | Val mse = 1.0273
Epoch 10: Train mse = 0.3383 | Val mse = 1.4879
Epoch 11: Train mse = 0.3164 | Val mse = 1.0815
Epoch 12: Train mse = 0.3047 | Val mse = 1.3079
Epoch 13: Train mse = 0.2929 | Val mse = 1.6047
Epoch 14: Train mse = 0.2809 | Val mse = 1.3470
Epoch 15: Train mse = 0.2754 | Val mse = 1.3393
Epoch 16: Train mse = 0.2714 | Val mse = 1.3723
Epoch 17: Train mse = 0.2702 | Val mse = 1.5451
Epoch 18: Train mse = 0.2637 | Val mse = 1.3617
Epoch 19: Train mse = 0.2584 | Val mse = 1.2530
Epoch 20: Train mse = 0.2532 | Val mse = 1.2399
Epoch 21: Train mse = 0.2478 | Val mse = 1.4583
Epoch 22: Train mse = 0.2383 | Val mse = 1.4773
Epoch 23: Train mse = 0.2249 | Val mse = 1.3162
Epoch 24: Train mse = 0.2157 | Val mse = 1.4645
Epoch 25: Train mse = 0.2153 | Val mse = 1.3412
Epoch 26: Train mse = 0.1968 | Val mse = 1.6725
Epoch 27: Train mse = 0.1862 | Val mse = 1.4568
Epoch 28: Train mse = 0.1759 | Val mse = 1.7216
Epoch 29: Train mse = 0.1715 | Val mse = 1.4031
Epoch 30: Train mse = 0.1665 | Val mse = 1.5695
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 5
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.39517245
1 day(s) RMSE                      : 6.67166341
1 day(s) R2                        : 0.00926064
1 day(s) Pearson r                 : 0.12178587
1 day(s) QLIKE                     : 0.46470677
3 day(s) MAE                       : 2.35933426
3 day(s) RMSE                      : 6.69124439
3 day(s) R2                        : 0.00336994
3 day(s) Pearson r                 : 0.10083813
3 day(s) QLIKE                     : 0.47463995
5 day(s) MAE                       : 2.34231772
5 day(s) RMSE                      : 6.70674921
5 day(s) R2                        : -0.00114183
5 day(s) Pearson r                 : 0.08674715
5 day(s) QLIKE                     : 0.47954383
full horizon MAE                   : 2.34231772
full horizon RMSE                  : 6.70674921
full horizon R2                    : -0.00114183
full horizon Pearson r             : 0.08674715
full horizon QLIKE                 : 0.47954383

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/MSFT/Simple_KAN_H5.pkl

Saved y_true min=0.27301, max=89.8516
Saved y_pred min=0.880445, max=8.53311

=== MSFT | H=10 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 10
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  15.662699184664
  Min value:  -6.772192238786855
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.2208777591762505
  Min value:  -2.8143642605456183
Checking X_price_val:
Shape: (161, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.716447710866394
  Min value:  -4.319177175086081
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.8952607849867555
  Min value:  -1.3217269183173053
Checking X_price_test:
Shape: (404, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.94429083559171
  Min value:  -6.772192238873022
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9328875355250505
  Min value:  -2.3383439250775
Epoch 1: Train mse = 0.9649 | Val mse = 0.7150
Epoch 2: Train mse = 0.8071 | Val mse = 1.8943
Epoch 3: Train mse = 0.6883 | Val mse = 1.5450
Epoch 4: Train mse = 0.5950 | Val mse = 1.0970
Epoch 5: Train mse = 0.5474 | Val mse = 0.8891
Epoch 6: Train mse = 0.4975 | Val mse = 1.1555
Epoch 7: Train mse = 0.4541 | Val mse = 0.9412
Epoch 8: Train mse = 0.4257 | Val mse = 0.9446
Epoch 9: Train mse = 0.4030 | Val mse = 0.9475
Epoch 10: Train mse = 0.3829 | Val mse = 0.9147
Epoch 11: Train mse = 0.3704 | Val mse = 0.6103
Epoch 12: Train mse = 0.3619 | Val mse = 0.8391
Epoch 13: Train mse = 0.3527 | Val mse = 0.7281
Epoch 14: Train mse = 0.3455 | Val mse = 0.8450
Epoch 15: Train mse = 0.3429 | Val mse = 0.8047
Epoch 16: Train mse = 0.3372 | Val mse = 0.8783
Epoch 17: Train mse = 0.3295 | Val mse = 0.7924
Epoch 18: Train mse = 0.3153 | Val mse = 0.9629
Epoch 19: Train mse = 0.2980 | Val mse = 0.9318
Epoch 20: Train mse = 0.2962 | Val mse = 0.9906
Epoch 21: Train mse = 0.3006 | Val mse = 0.8321
Epoch 22: Train mse = 0.2851 | Val mse = 0.9577
Epoch 23: Train mse = 0.2732 | Val mse = 1.0359
Epoch 24: Train mse = 0.2626 | Val mse = 1.0197
Epoch 25: Train mse = 0.2585 | Val mse = 1.0168
Epoch 26: Train mse = 0.2512 | Val mse = 1.1794
Epoch 27: Train mse = 0.2458 | Val mse = 1.0681
Epoch 28: Train mse = 0.2412 | Val mse = 1.0768
Epoch 29: Train mse = 0.2356 | Val mse = 1.0413
Epoch 30: Train mse = 0.2304 | Val mse = 1.0863
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 10
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.18464942
1 day(s) RMSE                      : 6.76204032
1 day(s) R2                        : -0.01776304
1 day(s) Pearson r                 : 0.06196167
1 day(s) QLIKE                     : 0.48083198
3 day(s) MAE                       : 2.20958431
3 day(s) RMSE                      : 6.78536804
3 day(s) R2                        : -0.02486583
3 day(s) Pearson r                 : 0.05046515
3 day(s) QLIKE                     : 0.48511247
5 day(s) MAE                       : 2.22938742
5 day(s) RMSE                      : 6.80333859
5 day(s) R2                        : -0.03018600
5 day(s) Pearson r                 : 0.04213740
5 day(s) QLIKE                     : 0.48734728
10 day(s) MAE                      : 2.25188535
10 day(s) RMSE                     : 6.82283430
10 day(s) R2                       : -0.03602270
10 day(s) Pearson r                : 0.03085607
10 day(s) QLIKE                    : 0.48842507
full horizon MAE                   : 2.25188535
full horizon RMSE                  : 6.82283430
full horizon R2                    : -0.03602270
full horizon Pearson r             : 0.03085607
full horizon QLIKE                 : 0.48842507

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/MSFT/Simple_KAN_H10.pkl

Saved y_true min=0.27301, max=89.8516
Saved y_pred min=0.662595, max=6.48753

=== MSFT | H=20 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  15.662699184664
  Min value:  -6.772192238786855
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.2181890995511795
  Min value:  -2.821740645483995
Checking X_price_val:
Shape: (161, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.716447710866394
  Min value:  -4.319177175086081
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.891688839722294
  Min value:  -1.3281087285369817
Checking X_price_test:
Shape: (404, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.94429083559171
  Min value:  -6.772192238873022
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.930006982137461
  Min value:  -2.3454031279530607
Epoch 1: Train mse = 0.9781 | Val mse = 0.6091
Epoch 2: Train mse = 0.8436 | Val mse = 4.4170
Epoch 3: Train mse = 0.7147 | Val mse = 0.7821
Epoch 4: Train mse = 0.6239 | Val mse = 1.0126
Epoch 5: Train mse = 0.5683 | Val mse = 1.0559
Epoch 6: Train mse = 0.5242 | Val mse = 0.8372
Epoch 7: Train mse = 0.4846 | Val mse = 0.7542
Epoch 8: Train mse = 0.4596 | Val mse = 0.9093
Epoch 9: Train mse = 0.4356 | Val mse = 0.8865
Epoch 10: Train mse = 0.4234 | Val mse = 0.8513
Epoch 11: Train mse = 0.4051 | Val mse = 0.7554
Epoch 12: Train mse = 0.3872 | Val mse = 0.8865
Epoch 13: Train mse = 0.3731 | Val mse = 0.8635
Epoch 14: Train mse = 0.3687 | Val mse = 0.8921
Epoch 15: Train mse = 0.3573 | Val mse = 0.8523
Epoch 16: Train mse = 0.3518 | Val mse = 0.8725
Epoch 17: Train mse = 0.3441 | Val mse = 0.9754
Epoch 18: Train mse = 0.3397 | Val mse = 0.8796
Epoch 19: Train mse = 0.3353 | Val mse = 0.8349
Epoch 20: Train mse = 0.3315 | Val mse = 0.9204
Epoch 21: Train mse = 0.3261 | Val mse = 0.8646
Epoch 22: Train mse = 0.3193 | Val mse = 0.7979
Epoch 23: Train mse = 0.3116 | Val mse = 0.8948
Epoch 24: Train mse = 0.3044 | Val mse = 0.8677
Epoch 25: Train mse = 0.2979 | Val mse = 0.9164
Epoch 26: Train mse = 0.2904 | Val mse = 0.9578
Epoch 27: Train mse = 0.2848 | Val mse = 0.8726
Epoch 28: Train mse = 0.2796 | Val mse = 0.9947
Epoch 29: Train mse = 0.2743 | Val mse = 0.8908
Epoch 30: Train mse = 0.2688 | Val mse = 0.9186
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 20
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.03240446
1 day(s) RMSE                      : 6.84342837
1 day(s) R2                        : -0.04241011
1 day(s) Pearson r                 : -0.03372010
1 day(s) QLIKE                     : 0.51434567
3 day(s) MAE                       : 2.03964118
3 day(s) RMSE                      : 6.85342951
3 day(s) R2                        : -0.04552903
3 day(s) Pearson r                 : -0.03462839
3 day(s) QLIKE                     : 0.51522089
5 day(s) MAE                       : 2.04262208
5 day(s) RMSE                      : 6.84705644
5 day(s) R2                        : -0.04346837
5 day(s) Pearson r                 : -0.03516538
5 day(s) QLIKE                     : 0.51127029
10 day(s) MAE                      : 2.05778336
10 day(s) RMSE                     : 6.84888201
10 day(s) R2                       : -0.04394830
10 day(s) Pearson r                : -0.04058281
10 day(s) QLIKE                    : 0.51113703
20 day(s) MAE                      : 2.13008206
20 day(s) RMSE                     : 7.05893875
20 day(s) R2                       : -0.04496904
20 day(s) Pearson r                : -0.04710252
20 day(s) QLIKE                    : 0.52423121
full horizon MAE                   : 2.13008206
full horizon RMSE                  : 7.05893875
full horizon R2                    : -0.04496904
full horizon Pearson r             : -0.04710252
full horizon QLIKE                 : 0.52423121

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/MSFT/Simple_KAN_H20.pkl

Saved y_true min=0.27301, max=89.8516
Saved y_pred min=0.985229, max=3.18312

=== GE | H=1 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.10128934238581
  Min value:  -4.100074765876385
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.030029259880504
  Min value:  -2.652656432671519
Checking X_price_val:
Shape: (161, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.0235417533877262
  Min value:  -1.87462522032935
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.0718644033108644
  Min value:  -2.366307284351862
Checking X_price_test:
Shape: (404, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.323746204771034
  Min value:  -1.87462522032935
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.646703896552383
  Min value:  -2.148420825574933
Epoch 1: Train mse = 0.9410 | Val mse = 0.6786
Epoch 2: Train mse = 0.7224 | Val mse = 0.5602
Epoch 3: Train mse = 0.6152 | Val mse = 0.7667
Epoch 4: Train mse = 0.5263 | Val mse = 0.6078
Epoch 5: Train mse = 0.4804 | Val mse = 0.4625
Epoch 6: Train mse = 0.4456 | Val mse = 0.3906
Epoch 7: Train mse = 0.4190 | Val mse = 0.5069
Epoch 8: Train mse = 0.4022 | Val mse = 0.4571
Epoch 9: Train mse = 0.3708 | Val mse = 0.4933
Epoch 10: Train mse = 0.3642 | Val mse = 0.4336
Epoch 11: Train mse = 0.3474 | Val mse = 0.4905
Epoch 12: Train mse = 0.3493 | Val mse = 0.5008
Epoch 13: Train mse = 0.3180 | Val mse = 0.4648
Epoch 14: Train mse = 0.3008 | Val mse = 0.4591
Epoch 15: Train mse = 0.2878 | Val mse = 0.5308
Epoch 16: Train mse = 0.2766 | Val mse = 0.4545
Epoch 17: Train mse = 0.2492 | Val mse = 0.5336
Epoch 18: Train mse = 0.2345 | Val mse = 0.4284
Epoch 19: Train mse = 0.2272 | Val mse = 0.4690
Epoch 20: Train mse = 0.2090 | Val mse = 0.4289
Epoch 21: Train mse = 0.2015 | Val mse = 0.4400
Epoch 22: Train mse = 0.1953 | Val mse = 0.4167
Epoch 23: Train mse = 0.1762 | Val mse = 0.4283
Epoch 24: Train mse = 0.2122 | Val mse = 0.5048
Epoch 25: Train mse = 0.1591 | Val mse = 0.4499
Epoch 26: Train mse = 0.1594 | Val mse = 0.5424
Epoch 27: Train mse = 0.1497 | Val mse = 0.4571
Epoch 28: Train mse = 0.1242 | Val mse = 0.5251
Epoch 29: Train mse = 0.1358 | Val mse = 0.4462
Epoch 30: Train mse = 0.1482 | Val mse = 0.5236
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 1
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.04590246
1 day(s) RMSE                      : 27.43741760
1 day(s) R2                        : -0.00081928
1 day(s) Pearson r                 : 0.13423973
1 day(s) QLIKE                     : 0.58339518
full horizon MAE                   : 4.04590246
full horizon RMSE                  : 27.43741760
full horizon R2                    : -0.00081928
full horizon Pearson r             : 0.13423973
full horizon QLIKE                 : 0.58339518

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/GE/Simple_KAN_H1.pkl

Saved y_true min=0.591065, max=527.531
Saved y_pred min=1.85714, max=9.97556

=== GE | H=5 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.10128934238581
  Min value:  -4.100074765876385
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.05628901565786
  Min value:  -2.663641411446664
Checking X_price_val:
Shape: (161, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.0235417533877262
  Min value:  -1.87462522032935
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.0759413314511908
  Min value:  -2.3763793738908188
Checking X_price_test:
Shape: (404, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.323746204771034
  Min value:  -1.87462522032935
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.658989486293715
  Min value:  -2.1577982868808543
Epoch 1: Train mse = 0.9629 | Val mse = 0.5906
Epoch 2: Train mse = 0.7561 | Val mse = 0.3987
Epoch 3: Train mse = 0.6301 | Val mse = 0.9765
Epoch 4: Train mse = 0.5596 | Val mse = 0.7188
Epoch 5: Train mse = 0.5111 | Val mse = 0.5055
Epoch 6: Train mse = 0.4801 | Val mse = 0.3715
Epoch 7: Train mse = 0.4401 | Val mse = 0.3961
Epoch 8: Train mse = 0.4199 | Val mse = 0.4076
Epoch 9: Train mse = 0.4025 | Val mse = 0.4717
Epoch 10: Train mse = 0.3889 | Val mse = 0.4160
Epoch 11: Train mse = 0.3753 | Val mse = 0.5222
Epoch 12: Train mse = 0.3641 | Val mse = 0.4751
Epoch 13: Train mse = 0.3566 | Val mse = 0.5707
Epoch 14: Train mse = 0.3474 | Val mse = 0.4556
Epoch 15: Train mse = 0.3441 | Val mse = 0.5275
Epoch 16: Train mse = 0.3360 | Val mse = 0.4332
Epoch 17: Train mse = 0.3272 | Val mse = 0.4181
Epoch 18: Train mse = 0.3227 | Val mse = 0.4339
Epoch 19: Train mse = 0.3175 | Val mse = 0.4431
Epoch 20: Train mse = 0.3101 | Val mse = 0.4240
Epoch 21: Train mse = 0.3103 | Val mse = 0.4123
Epoch 22: Train mse = 0.3007 | Val mse = 0.4144
Epoch 23: Train mse = 0.2946 | Val mse = 0.4292
Epoch 24: Train mse = 0.2861 | Val mse = 0.4319
Epoch 25: Train mse = 0.2779 | Val mse = 0.4351
Epoch 26: Train mse = 0.2687 | Val mse = 0.4280
Epoch 27: Train mse = 0.2608 | Val mse = 0.4454
Epoch 28: Train mse = 0.2555 | Val mse = 0.4499
Epoch 29: Train mse = 0.2492 | Val mse = 0.4546
Epoch 30: Train mse = 0.2417 | Val mse = 0.4499
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 5
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.24708471
1 day(s) RMSE                      : 27.60476089
1 day(s) R2                        : -0.01306469
1 day(s) Pearson r                 : 0.14212663
1 day(s) QLIKE                     : 0.57748728
3 day(s) MAE                       : 4.23087124
3 day(s) RMSE                      : 27.59444237
3 day(s) R2                        : -0.01236573
3 day(s) Pearson r                 : 0.14604028
3 day(s) QLIKE                     : 0.57622837
5 day(s) MAE                       : 4.23786027
5 day(s) RMSE                      : 27.59368417
5 day(s) R2                        : -0.01236807
5 day(s) Pearson r                 : 0.15063285
5 day(s) QLIKE                     : 0.57304989
full horizon MAE                   : 4.23786027
full horizon RMSE                  : 27.59368417
full horizon R2                    : -0.01236807
full horizon Pearson r             : 0.15063285
full horizon QLIKE                 : 0.57304989

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/GE/Simple_KAN_H5.pkl

Saved y_true min=0.591065, max=527.531
Saved y_pred min=1.62657, max=4.22927

=== GE | H=10 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 10
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.10128934238581
  Min value:  -4.100074765876385
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.080595466809852
  Min value:  -2.674432668646892
Checking X_price_val:
Shape: (161, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.0235417533877262
  Min value:  -1.87462522032935
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.079343715915996
  Min value:  -2.3863103666656302
Checking X_price_test:
Shape: (404, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.323746204771034
  Min value:  -1.87462522032935
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.670127332275317
  Min value:  -2.167074694264489
Epoch 1: Train mse = 0.9680 | Val mse = 0.5740
Epoch 2: Train mse = 0.7850 | Val mse = 0.5834
Epoch 3: Train mse = 0.6456 | Val mse = 1.2520
Epoch 4: Train mse = 0.5724 | Val mse = 0.9381
Epoch 5: Train mse = 0.5357 | Val mse = 0.6365
Epoch 6: Train mse = 0.4889 | Val mse = 0.6580
Epoch 7: Train mse = 0.4622 | Val mse = 0.5036
Epoch 8: Train mse = 0.4453 | Val mse = 0.6822
Epoch 9: Train mse = 0.4328 | Val mse = 0.8089
Epoch 10: Train mse = 0.4248 | Val mse = 0.7605
Epoch 11: Train mse = 0.4140 | Val mse = 0.7149
Epoch 12: Train mse = 0.4111 | Val mse = 0.7975
Epoch 13: Train mse = 0.4023 | Val mse = 0.7534
Epoch 14: Train mse = 0.3938 | Val mse = 0.8222
Epoch 15: Train mse = 0.3894 | Val mse = 0.8373
Epoch 16: Train mse = 0.3851 | Val mse = 0.8241
Epoch 17: Train mse = 0.3782 | Val mse = 0.8749
Epoch 18: Train mse = 0.3759 | Val mse = 0.8395
Epoch 19: Train mse = 0.3703 | Val mse = 0.9839
Epoch 20: Train mse = 0.3576 | Val mse = 0.8961
Epoch 21: Train mse = 0.3457 | Val mse = 1.0443
Epoch 22: Train mse = 0.3497 | Val mse = 0.9105
Epoch 23: Train mse = 0.3431 | Val mse = 1.1360
Epoch 24: Train mse = 0.3280 | Val mse = 1.0146
Epoch 25: Train mse = 0.3187 | Val mse = 1.1245
Epoch 26: Train mse = 0.3126 | Val mse = 1.0759
Epoch 27: Train mse = 0.3060 | Val mse = 1.1044
Epoch 28: Train mse = 0.3021 | Val mse = 1.0541
Epoch 29: Train mse = 0.2958 | Val mse = 1.0889
Epoch 30: Train mse = 0.2938 | Val mse = 1.0899
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 10
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.20590176
1 day(s) RMSE                      : 27.47946687
1 day(s) R2                        : -0.00388925
1 day(s) Pearson r                 : 0.12597910
1 day(s) QLIKE                     : 0.61697216
3 day(s) MAE                       : 4.22101293
3 day(s) RMSE                      : 27.47473113
3 day(s) R2                        : -0.00360101
3 day(s) Pearson r                 : 0.12351563
3 day(s) QLIKE                     : 0.61664799
5 day(s) MAE                       : 4.23225229
5 day(s) RMSE                      : 27.47225148
5 day(s) R2                        : -0.00347734
5 day(s) Pearson r                 : 0.12285672
5 day(s) QLIKE                     : 0.61666400
10 day(s) MAE                      : 4.23866564
10 day(s) RMSE                     : 27.47212966
10 day(s) R2                       : -0.00332118
10 day(s) Pearson r                : 0.13007834
10 day(s) QLIKE                    : 0.60829894
full horizon MAE                   : 4.23866564
full horizon RMSE                  : 27.47212966
full horizon R2                    : -0.00332118
full horizon Pearson r             : 0.13007834
full horizon QLIKE                 : 0.60829894

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/GE/Simple_KAN_H10.pkl

Saved y_true min=0.591065, max=527.531
Saved y_pred min=2.64359, max=6.20741

=== GE | H=20 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.10128934238581
  Min value:  -4.100074765876385
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.120224031597557
  Min value:  -2.692943196140936
Checking X_price_val:
Shape: (161, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.0235417533877262
  Min value:  -1.87462522032935
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.084344850046585
  Min value:  -2.4033958725510236
Checking X_price_test:
Shape: (404, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.323746204771034
  Min value:  -1.87462522032935
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.6879422007515705
  Min value:  -2.183075884328406
Epoch 1: Train mse = 0.9705 | Val mse = 0.6144
Epoch 2: Train mse = 0.7905 | Val mse = 0.9188
Epoch 3: Train mse = 0.6566 | Val mse = 1.4962
Epoch 4: Train mse = 0.5961 | Val mse = 0.9559
Epoch 5: Train mse = 0.5474 | Val mse = 0.5920
Epoch 6: Train mse = 0.5096 | Val mse = 0.5143
Epoch 7: Train mse = 0.4898 | Val mse = 0.4915
Epoch 8: Train mse = 0.4767 | Val mse = 0.6006
Epoch 9: Train mse = 0.4672 | Val mse = 0.6780
Epoch 10: Train mse = 0.4622 | Val mse = 0.6727
Epoch 11: Train mse = 0.4523 | Val mse = 0.8075
Epoch 12: Train mse = 0.4423 | Val mse = 0.8084
Epoch 13: Train mse = 0.4291 | Val mse = 0.8396
Epoch 14: Train mse = 0.4199 | Val mse = 0.8741
Epoch 15: Train mse = 0.4085 | Val mse = 0.8999
Epoch 16: Train mse = 0.4008 | Val mse = 0.9463
Epoch 17: Train mse = 0.3913 | Val mse = 0.8384
Epoch 18: Train mse = 0.3895 | Val mse = 0.9680
Epoch 19: Train mse = 0.3846 | Val mse = 0.9535
Epoch 20: Train mse = 0.3802 | Val mse = 0.8934
Epoch 21: Train mse = 0.3771 | Val mse = 1.0385
Epoch 22: Train mse = 0.3705 | Val mse = 0.9482
Epoch 23: Train mse = 0.3668 | Val mse = 0.9912
Epoch 24: Train mse = 0.3606 | Val mse = 1.0019
Epoch 25: Train mse = 0.3554 | Val mse = 1.0029
Epoch 26: Train mse = 0.3460 | Val mse = 1.0395
Epoch 27: Train mse = 0.3381 | Val mse = 0.9882
Epoch 28: Train mse = 0.3346 | Val mse = 1.1618
Epoch 29: Train mse = 0.3298 | Val mse = 1.2447
Epoch 30: Train mse = 0.3291 | Val mse = 1.0515
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 20
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.21736126
1 day(s) RMSE                      : 27.53810045
1 day(s) R2                        : -0.00817786
1 day(s) Pearson r                 : 0.09776800
1 day(s) QLIKE                     : 0.62401058
3 day(s) MAE                       : 4.20902362
3 day(s) RMSE                      : 27.53323054
3 day(s) R2                        : -0.00787931
3 day(s) Pearson r                 : 0.10069852
3 day(s) QLIKE                     : 0.62283444
5 day(s) MAE                       : 4.20775943
5 day(s) RMSE                      : 27.53910717
5 day(s) R2                        : -0.00836735
5 day(s) Pearson r                 : 0.10054168
5 day(s) QLIKE                     : 0.61966181
10 day(s) MAE                      : 4.22512756
10 day(s) RMSE                     : 27.54050710
10 day(s) R2                       : -0.00832188
10 day(s) Pearson r                : 0.10440397
10 day(s) QLIKE                    : 0.61560962
20 day(s) MAE                      : 4.25221882
20 day(s) RMSE                     : 27.54054181
20 day(s) R2                       : -0.00827107
20 day(s) Pearson r                : 0.10410960
20 day(s) QLIKE                    : 0.61601795
full horizon MAE                   : 4.25221882
full horizon RMSE                  : 27.54054181
full horizon R2                    : -0.00827107
full horizon Pearson r             : 0.10410960
full horizon QLIKE                 : 0.61601795

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/GE/Simple_KAN_H20.pkl

Saved y_true min=0.591065, max=527.531
Saved y_pred min=1.94896, max=4.96888

=== BAC | H=1 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.142355974179623
  Min value:  -4.428701459937723
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.10774278654742
  Min value:  -3.152671373228803
Checking X_price_val:
Shape: (161, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.800014951166819
  Min value:  -3.0829742668180926
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.2542888447212466
  Min value:  -1.29240155253667
Checking X_price_test:
Shape: (404, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.111754968787336
  Min value:  -2.9204727182396413
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.207108669727663
  Min value:  -1.9461535420618146
Epoch 1: Train mse = 0.9185 | Val mse = 0.5658
Epoch 2: Train mse = 0.6551 | Val mse = 0.5616
Epoch 3: Train mse = 0.5806 | Val mse = 0.5121
Epoch 4: Train mse = 0.4857 | Val mse = 0.5967
Epoch 5: Train mse = 0.4428 | Val mse = 0.5164
Epoch 6: Train mse = 0.3837 | Val mse = 0.5586
Epoch 7: Train mse = 0.3526 | Val mse = 0.5082
Epoch 8: Train mse = 0.3162 | Val mse = 0.5049
Epoch 9: Train mse = 0.2933 | Val mse = 0.5536
Epoch 10: Train mse = 0.2582 | Val mse = 0.5059
Epoch 11: Train mse = 0.2264 | Val mse = 0.5179
Epoch 12: Train mse = 0.2062 | Val mse = 0.5261
Epoch 13: Train mse = 0.1746 | Val mse = 0.5370
Epoch 14: Train mse = 0.1588 | Val mse = 0.5450
Epoch 15: Train mse = 0.1413 | Val mse = 0.6004
Epoch 16: Train mse = 0.1311 | Val mse = 0.5800
Epoch 17: Train mse = 0.1148 | Val mse = 0.5654
Epoch 18: Train mse = 0.0964 | Val mse = 0.5505
Epoch 19: Train mse = 0.0782 | Val mse = 0.5939
Epoch 20: Train mse = 0.0673 | Val mse = 0.5888
Epoch 21: Train mse = 0.0611 | Val mse = 0.6118
Epoch 22: Train mse = 0.0508 | Val mse = 0.6469
Epoch 23: Train mse = 0.0425 | Val mse = 0.6224
Epoch 24: Train mse = 0.0383 | Val mse = 0.5722
Epoch 25: Train mse = 0.0343 | Val mse = 0.5931
Epoch 26: Train mse = 0.0281 | Val mse = 0.5905
Epoch 27: Train mse = 0.0232 | Val mse = 0.6016
Epoch 28: Train mse = 0.0219 | Val mse = 0.5788
Epoch 29: Train mse = 0.0198 | Val mse = 0.5937
Epoch 30: Train mse = 0.0182 | Val mse = 0.6169
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 1
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.27445327
1 day(s) RMSE                      : 7.56234546
1 day(s) R2                        : 0.05094868
1 day(s) Pearson r                 : 0.25017738
1 day(s) QLIKE                     : 0.45358636
full horizon MAE                   : 2.27445327
full horizon RMSE                  : 7.56234546
full horizon R2                    : 0.05094868
full horizon Pearson r             : 0.25017738
full horizon QLIKE                 : 0.45358636

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/BAC/Simple_KAN_H1.pkl

Saved y_true min=0.587715, max=111.472
Saved y_pred min=1.32094, max=20.2467

=== BAC | H=5 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.142355974179623
  Min value:  -4.428701459937723
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.106232115580275
  Min value:  -3.150569092336922
Checking X_price_val:
Shape: (161, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.800014951166819
  Min value:  -3.0829742668180926
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.253588840112375
  Min value:  -1.2911129191419919
Checking X_price_test:
Shape: (404, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.111754968787336
  Min value:  -2.9204727182396413
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.205991919409497
  Min value:  -1.9445789696790647
Epoch 1: Train mse = 0.9293 | Val mse = 0.5409
Epoch 2: Train mse = 0.7837 | Val mse = 0.5272
Epoch 3: Train mse = 0.7330 | Val mse = 0.5312
Epoch 4: Train mse = 0.6196 | Val mse = 0.5598
Epoch 5: Train mse = 0.5311 | Val mse = 0.5623
Epoch 6: Train mse = 0.4512 | Val mse = 0.5919
Epoch 7: Train mse = 0.4150 | Val mse = 0.5974
Epoch 8: Train mse = 0.3812 | Val mse = 0.6078
Epoch 9: Train mse = 0.3529 | Val mse = 0.6767
Epoch 10: Train mse = 0.3329 | Val mse = 0.6162
Epoch 11: Train mse = 0.3124 | Val mse = 0.6635
Epoch 12: Train mse = 0.3009 | Val mse = 0.6474
Epoch 13: Train mse = 0.2931 | Val mse = 0.6731
Epoch 14: Train mse = 0.2897 | Val mse = 0.6829
Epoch 15: Train mse = 0.2823 | Val mse = 0.7044
Epoch 16: Train mse = 0.2792 | Val mse = 0.7185
Epoch 17: Train mse = 0.2735 | Val mse = 0.6711
Epoch 18: Train mse = 0.2688 | Val mse = 0.7078
Epoch 19: Train mse = 0.2598 | Val mse = 0.6834
Epoch 20: Train mse = 0.2504 | Val mse = 0.7043
Epoch 21: Train mse = 0.2377 | Val mse = 0.7014
Epoch 22: Train mse = 0.2310 | Val mse = 0.7058
Epoch 23: Train mse = 0.2167 | Val mse = 0.6943
Epoch 24: Train mse = 0.2063 | Val mse = 0.6833
Epoch 25: Train mse = 0.2009 | Val mse = 0.7173
Epoch 26: Train mse = 0.1917 | Val mse = 0.6873
Epoch 27: Train mse = 0.1858 | Val mse = 0.7285
Epoch 28: Train mse = 0.1808 | Val mse = 0.6665
Epoch 29: Train mse = 0.1744 | Val mse = 0.7173
Epoch 30: Train mse = 0.1694 | Val mse = 0.6883
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 5
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.60878762
1 day(s) RMSE                      : 7.81880219
1 day(s) R2                        : -0.01451185
1 day(s) Pearson r                 : -0.03943716
1 day(s) QLIKE                     : 0.45910379
3 day(s) MAE                       : 2.61973922
3 day(s) RMSE                      : 7.83520593
3 day(s) R2                        : -0.01812267
3 day(s) Pearson r                 : -0.06794995
3 day(s) QLIKE                     : 0.46911633
5 day(s) MAE                       : 2.58892064
5 day(s) RMSE                      : 7.84042421
5 day(s) R2                        : -0.01916429
5 day(s) Pearson r                 : -0.08762327
5 day(s) QLIKE                     : 0.47056519
full horizon MAE                   : 2.58892064
full horizon RMSE                  : 7.84042421
full horizon R2                    : -0.01916429
full horizon Pearson r             : -0.08762327
full horizon QLIKE                 : 0.47056519

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/BAC/Simple_KAN_H5.pkl

Saved y_true min=0.587715, max=111.472
Saved y_pred min=2.40163, max=4.49188

=== BAC | H=10 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 10
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.142355974179623
  Min value:  -4.428701459937723
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.1031963312484585
  Min value:  -3.149203897636293
Checking X_price_val:
Shape: (161, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.800014951166819
  Min value:  -3.0829742668180926
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.251540537979127
  Min value:  -1.2907388380002232
Checking X_price_test:
Shape: (404, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.111754968787336
  Min value:  -2.9204727182396413
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.203435974440135
  Min value:  -1.9438565829024124
Epoch 1: Train mse = 0.9502 | Val mse = 0.5285
Epoch 2: Train mse = 0.8322 | Val mse = 0.5014
Epoch 3: Train mse = 0.7407 | Val mse = 0.5385
Epoch 4: Train mse = 0.6383 | Val mse = 0.5257
Epoch 5: Train mse = 0.5570 | Val mse = 0.5362
Epoch 6: Train mse = 0.4914 | Val mse = 0.6212
Epoch 7: Train mse = 0.4489 | Val mse = 0.5836
Epoch 8: Train mse = 0.4176 | Val mse = 0.6386
Epoch 9: Train mse = 0.4026 | Val mse = 0.6267
Epoch 10: Train mse = 0.3860 | Val mse = 0.6575
Epoch 11: Train mse = 0.3732 | Val mse = 0.6458
Epoch 12: Train mse = 0.3651 | Val mse = 0.6440
Epoch 13: Train mse = 0.3559 | Val mse = 0.6788
Epoch 14: Train mse = 0.3543 | Val mse = 0.6340
Epoch 15: Train mse = 0.3478 | Val mse = 0.6412
Epoch 16: Train mse = 0.3378 | Val mse = 0.6284
Epoch 17: Train mse = 0.3234 | Val mse = 0.6312
Epoch 18: Train mse = 0.3067 | Val mse = 0.6310
Epoch 19: Train mse = 0.2929 | Val mse = 0.6419
Epoch 20: Train mse = 0.2898 | Val mse = 0.6535
Epoch 21: Train mse = 0.2798 | Val mse = 0.6718
Epoch 22: Train mse = 0.2750 | Val mse = 0.6334
Epoch 23: Train mse = 0.2714 | Val mse = 0.6637
Epoch 24: Train mse = 0.2662 | Val mse = 0.6437
Epoch 25: Train mse = 0.2624 | Val mse = 0.6471
Epoch 26: Train mse = 0.2587 | Val mse = 0.6489
Epoch 27: Train mse = 0.2546 | Val mse = 0.6679
Epoch 28: Train mse = 0.2483 | Val mse = 0.6549
Epoch 29: Train mse = 0.2403 | Val mse = 0.6520
Epoch 30: Train mse = 0.2338 | Val mse = 0.6808
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 10
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.31748362
1 day(s) RMSE                      : 7.83325846
1 day(s) R2                        : -0.01826680
1 day(s) Pearson r                 : -0.03387885
1 day(s) QLIKE                     : 0.44796140
3 day(s) MAE                       : 2.34117782
3 day(s) RMSE                      : 7.83949416
3 day(s) R2                        : -0.01923742
3 day(s) Pearson r                 : -0.07027134
3 day(s) QLIKE                     : 0.45258718
5 day(s) MAE                       : 2.35382966
5 day(s) RMSE                      : 7.84426726
5 day(s) R2                        : -0.02016364
5 day(s) Pearson r                 : -0.09473055
5 day(s) QLIKE                     : 0.45552393
10 day(s) MAE                      : 2.36923141
10 day(s) RMSE                     : 7.85983761
10 day(s) R2                       : -0.02412039
10 day(s) Pearson r                : -0.12158014
10 day(s) QLIKE                    : 0.46455460
full horizon MAE                   : 2.36923141
full horizon RMSE                  : 7.85983761
full horizon R2                    : -0.02412039
full horizon Pearson r             : -0.12158014
full horizon QLIKE                 : 0.46455460

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/BAC/Simple_KAN_H10.pkl

Saved y_true min=0.587715, max=111.472
Saved y_pred min=2.1105, max=4.2876

=== BAC | H=20 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.142355974179623
  Min value:  -4.428701459937723
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.103268362353564
  Min value:  -3.1487890186062457
Checking X_price_val:
Shape: (161, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.800014951166819
  Min value:  -3.0829742668180926
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.251689496563368
  Min value:  -1.290401169341898
Checking X_price_test:
Shape: (404, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.111754968787336
  Min value:  -2.9204727182396413
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.203545386302683
  Min value:  -1.9434917803124587
Epoch 1: Train mse = 0.9693 | Val mse = 0.5318
Epoch 2: Train mse = 0.8190 | Val mse = 0.5002
Epoch 3: Train mse = 0.7102 | Val mse = 0.5215
Epoch 4: Train mse = 0.6144 | Val mse = 0.4884
Epoch 5: Train mse = 0.5539 | Val mse = 0.5026
Epoch 6: Train mse = 0.5059 | Val mse = 0.5230
Epoch 7: Train mse = 0.4766 | Val mse = 0.5340
Epoch 8: Train mse = 0.4548 | Val mse = 0.6238
Epoch 9: Train mse = 0.4327 | Val mse = 0.5285
Epoch 10: Train mse = 0.4170 | Val mse = 0.5310
Epoch 11: Train mse = 0.4030 | Val mse = 0.5327
Epoch 12: Train mse = 0.3917 | Val mse = 0.5153
Epoch 13: Train mse = 0.3798 | Val mse = 0.4978
Epoch 14: Train mse = 0.3720 | Val mse = 0.5167
Epoch 15: Train mse = 0.3664 | Val mse = 0.5103
Epoch 16: Train mse = 0.3597 | Val mse = 0.5268
Epoch 17: Train mse = 0.3536 | Val mse = 0.5250
Epoch 18: Train mse = 0.3472 | Val mse = 0.5241
Epoch 19: Train mse = 0.3422 | Val mse = 0.5223
Epoch 20: Train mse = 0.3380 | Val mse = 0.5271
Epoch 21: Train mse = 0.3327 | Val mse = 0.5215
Epoch 22: Train mse = 0.3270 | Val mse = 0.5322
Epoch 23: Train mse = 0.3200 | Val mse = 0.5216
Epoch 24: Train mse = 0.3093 | Val mse = 0.5325
Epoch 25: Train mse = 0.3005 | Val mse = 0.5479
Epoch 26: Train mse = 0.2919 | Val mse = 0.5603
Epoch 27: Train mse = 0.2855 | Val mse = 0.5522
Epoch 28: Train mse = 0.2773 | Val mse = 0.5644
Epoch 29: Train mse = 0.2703 | Val mse = 0.5530
Epoch 30: Train mse = 0.2629 | Val mse = 0.5649
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 20
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.51626768
1 day(s) RMSE                      : 7.95222584
1 day(s) R2                        : -0.04943147
1 day(s) Pearson r                 : -0.05234440
1 day(s) QLIKE                     : 0.51950806
3 day(s) MAE                       : 2.50709765
3 day(s) RMSE                      : 7.96761254
3 day(s) R2                        : -0.05282379
3 day(s) Pearson r                 : -0.07138328
3 day(s) QLIKE                     : 0.53274623
5 day(s) MAE                       : 2.50733997
5 day(s) RMSE                      : 7.97805936
5 day(s) R2                        : -0.05526031
5 day(s) Pearson r                 : -0.08341426
5 day(s) QLIKE                     : 0.53895782
10 day(s) MAE                      : 2.56712265
10 day(s) RMSE                     : 8.01364516
10 day(s) R2                       : -0.06459417
10 day(s) Pearson r                : -0.09603595
10 day(s) QLIKE                    : 0.55911839
20 day(s) MAE                      : 2.61654886
20 day(s) RMSE                     : 8.02643004
20 day(s) R2                       : -0.06834558
20 day(s) Pearson r                : -0.09043413
20 day(s) QLIKE                    : 0.56487918
full horizon MAE                   : 2.61654886
full horizon RMSE                  : 8.02643004
full horizon R2                    : -0.06834558
full horizon Pearson r             : -0.09043413
full horizon QLIKE                 : 0.56487918

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/BAC/Simple_KAN_H20.pkl

Saved y_true min=0.587715, max=111.472
Saved y_pred min=1.5241, max=6.89023

=== C | H=1 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.80914068636715
  Min value:  -5.781033963264048
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.116803264609633
  Min value:  -2.5790852911299234
Checking X_price_val:
Shape: (161, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.316307789702823
  Min value:  -3.321376796599313
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.6474166417153135
  Min value:  -1.663048077620065
Checking X_price_test:
Shape: (404, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.453119269597156
  Min value:  -3.4619875412520558
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.113246907180982
  Min value:  -2.058904827000899
Epoch 1: Train mse = 0.9151 | Val mse = 1.1496
Epoch 2: Train mse = 0.7381 | Val mse = 1.0338
Epoch 3: Train mse = 0.6327 | Val mse = 1.3866
Epoch 4: Train mse = 0.5482 | Val mse = 0.4775
Epoch 5: Train mse = 0.4725 | Val mse = 1.3352
Epoch 6: Train mse = 0.4143 | Val mse = 1.3612
Epoch 7: Train mse = 0.3874 | Val mse = 0.8092
Epoch 8: Train mse = 0.3482 | Val mse = 1.5984
Epoch 9: Train mse = 0.3087 | Val mse = 1.0738
Epoch 10: Train mse = 0.2843 | Val mse = 1.1910
Epoch 11: Train mse = 0.2551 | Val mse = 1.2608
Epoch 12: Train mse = 0.2337 | Val mse = 1.1709
Epoch 13: Train mse = 0.2130 | Val mse = 0.9140
Epoch 14: Train mse = 0.1953 | Val mse = 1.2065
Epoch 15: Train mse = 0.1734 | Val mse = 0.8229
Epoch 16: Train mse = 0.1647 | Val mse = 1.2021
Epoch 17: Train mse = 0.1399 | Val mse = 1.2290
Epoch 18: Train mse = 0.1363 | Val mse = 1.5384
Epoch 19: Train mse = 0.1240 | Val mse = 0.8120
Epoch 20: Train mse = 0.1183 | Val mse = 1.3908
Epoch 21: Train mse = 0.1120 | Val mse = 1.1610
Epoch 22: Train mse = 0.1023 | Val mse = 1.0829
Epoch 23: Train mse = 0.0892 | Val mse = 0.8880
Epoch 24: Train mse = 0.0759 | Val mse = 0.9996
Epoch 25: Train mse = 0.0751 | Val mse = 0.7573
Epoch 26: Train mse = 0.0729 | Val mse = 0.9482
Epoch 27: Train mse = 0.0621 | Val mse = 0.8507
Epoch 28: Train mse = 0.0569 | Val mse = 0.9297
Epoch 29: Train mse = 0.0470 | Val mse = 0.8220
Epoch 30: Train mse = 0.0524 | Val mse = 0.8560
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 1
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.83079705
1 day(s) RMSE                      : 9.35238622
1 day(s) R2                        : 0.01828693
1 day(s) Pearson r                 : 0.26113963
1 day(s) QLIKE                     : 0.37579545
full horizon MAE                   : 2.83079705
full horizon RMSE                  : 9.35238622
full horizon R2                    : 0.01828693
full horizon Pearson r             : 0.26113963
full horizon QLIKE                 : 0.37579545

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/C/Simple_KAN_H1.pkl

Saved y_true min=0.525001, max=134.647
Saved y_pred min=1.90879, max=9.36335

=== C | H=5 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.80914068636715
  Min value:  -5.781033963264048
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.113808938949674
  Min value:  -2.5779301635708127
Checking X_price_val:
Shape: (161, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.316307789702823
  Min value:  -3.321376796599313
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.6457537548096046
  Min value:  -1.6623868571045473
Checking X_price_test:
Shape: (404, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.453119269597156
  Min value:  -3.4619875412520558
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.110793676954993
  Min value:  -2.05803016926483
Epoch 1: Train mse = 0.9275 | Val mse = 0.5884
Epoch 2: Train mse = 0.7303 | Val mse = 0.9053
Epoch 3: Train mse = 0.6666 | Val mse = 1.4772
Epoch 4: Train mse = 0.5606 | Val mse = 0.7820
Epoch 5: Train mse = 0.5037 | Val mse = 0.9060
Epoch 6: Train mse = 0.4530 | Val mse = 1.6608
Epoch 7: Train mse = 0.4056 | Val mse = 0.9117
Epoch 8: Train mse = 0.3724 | Val mse = 1.9222
Epoch 9: Train mse = 0.3433 | Val mse = 1.3655
Epoch 10: Train mse = 0.3233 | Val mse = 1.6783
Epoch 11: Train mse = 0.3052 | Val mse = 2.2643
Epoch 12: Train mse = 0.2922 | Val mse = 1.0609
Epoch 13: Train mse = 0.2840 | Val mse = 1.6263
Epoch 14: Train mse = 0.2745 | Val mse = 1.1839
Epoch 15: Train mse = 0.2683 | Val mse = 1.8262
Epoch 16: Train mse = 0.2631 | Val mse = 1.2358
Epoch 17: Train mse = 0.2584 | Val mse = 1.4218
Epoch 18: Train mse = 0.2533 | Val mse = 1.2947
Epoch 19: Train mse = 0.2506 | Val mse = 1.5033
Epoch 20: Train mse = 0.2466 | Val mse = 1.3009
Epoch 21: Train mse = 0.2443 | Val mse = 1.4638
Epoch 22: Train mse = 0.2403 | Val mse = 1.3434
Epoch 23: Train mse = 0.2346 | Val mse = 1.3118
Epoch 24: Train mse = 0.2253 | Val mse = 1.5624
Epoch 25: Train mse = 0.2195 | Val mse = 1.4337
Epoch 26: Train mse = 0.2105 | Val mse = 1.3632
Epoch 27: Train mse = 0.2057 | Val mse = 1.5391
Epoch 28: Train mse = 0.1960 | Val mse = 1.4077
Epoch 29: Train mse = 0.1874 | Val mse = 1.5848
Epoch 30: Train mse = 0.1799 | Val mse = 1.5070
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 5
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.78352390
1 day(s) RMSE                      : 9.62483433
1 day(s) R2                        : -0.03974354
1 day(s) Pearson r                 : 0.03226020
1 day(s) QLIKE                     : 0.42717548
3 day(s) MAE                       : 2.84387901
3 day(s) RMSE                      : 9.71131439
3 day(s) R2                        : -0.05335032
3 day(s) Pearson r                 : 0.00300381
3 day(s) QLIKE                     : 0.43839667
5 day(s) MAE                       : 2.89243953
5 day(s) RMSE                      : 9.75856211
5 day(s) R2                        : -0.06023890
5 day(s) Pearson r                 : -0.02124450
5 day(s) QLIKE                     : 0.44838879
full horizon MAE                   : 2.89243953
full horizon RMSE                  : 9.75856211
full horizon R2                    : -0.06023890
full horizon Pearson r             : -0.02124450
full horizon QLIKE                 : 0.44838879

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/C/Simple_KAN_H5.pkl

Saved y_true min=0.525001, max=134.647
Saved y_pred min=1.54577, max=3.4438

=== C | H=10 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 10
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.80914068636715
  Min value:  -5.781033963264048
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.111052537752696
  Min value:  -2.577782485226596
Checking X_price_val:
Shape: (161, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.316307789702823
  Min value:  -3.321376796599313
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.6439291882044227
  Min value:  -1.662584849687275
Checking X_price_test:
Shape: (404, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.453119269597156
  Min value:  -3.4619875412520558
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.1084159724432565
  Min value:  -2.0580787834517045
Epoch 1: Train mse = 0.9498 | Val mse = 0.4963
Epoch 2: Train mse = 0.7451 | Val mse = 0.7568
Epoch 3: Train mse = 0.6458 | Val mse = 1.3818
Epoch 4: Train mse = 0.5698 | Val mse = 0.9882
Epoch 5: Train mse = 0.5189 | Val mse = 0.9378
Epoch 6: Train mse = 0.4716 | Val mse = 0.9955
Epoch 7: Train mse = 0.4273 | Val mse = 0.9364
Epoch 8: Train mse = 0.3957 | Val mse = 0.8568
Epoch 9: Train mse = 0.3706 | Val mse = 0.9264
Epoch 10: Train mse = 0.3554 | Val mse = 0.8946
Epoch 11: Train mse = 0.3431 | Val mse = 1.1963
Epoch 12: Train mse = 0.3335 | Val mse = 1.1366
Epoch 13: Train mse = 0.3252 | Val mse = 1.0034
Epoch 14: Train mse = 0.3200 | Val mse = 0.9859
Epoch 15: Train mse = 0.3125 | Val mse = 1.1691
Epoch 16: Train mse = 0.3063 | Val mse = 0.9269
Epoch 17: Train mse = 0.2983 | Val mse = 0.9436
Epoch 18: Train mse = 0.2875 | Val mse = 0.9321
Epoch 19: Train mse = 0.2794 | Val mse = 0.8994
Epoch 20: Train mse = 0.2727 | Val mse = 0.8556
Epoch 21: Train mse = 0.2670 | Val mse = 1.0359
Epoch 22: Train mse = 0.2584 | Val mse = 0.8588
Epoch 23: Train mse = 0.2536 | Val mse = 1.0431
Epoch 24: Train mse = 0.2474 | Val mse = 0.9746
Epoch 25: Train mse = 0.2457 | Val mse = 0.9282
Epoch 26: Train mse = 0.2412 | Val mse = 0.9610
Epoch 27: Train mse = 0.2376 | Val mse = 1.0133
Epoch 28: Train mse = 0.2334 | Val mse = 0.9658
Epoch 29: Train mse = 0.2261 | Val mse = 1.0515
Epoch 30: Train mse = 0.2202 | Val mse = 0.9481
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 10
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.77694165
1 day(s) RMSE                      : 9.58895289
1 day(s) R2                        : -0.03200565
1 day(s) Pearson r                 : 0.06546580
1 day(s) QLIKE                     : 0.42251283
3 day(s) MAE                       : 2.81694502
3 day(s) RMSE                      : 9.66956987
3 day(s) R2                        : -0.04431403
3 day(s) Pearson r                 : 0.03239317
3 day(s) QLIKE                     : 0.43079135
5 day(s) MAE                       : 2.85472443
5 day(s) RMSE                      : 9.70901185
5 day(s) R2                        : -0.04949925
5 day(s) Pearson r                 : 0.01375720
5 day(s) QLIKE                     : 0.43665767
10 day(s) MAE                      : 2.88060415
10 day(s) RMSE                     : 9.72220655
10 day(s) R2                       : -0.04989743
10 day(s) Pearson r                : -0.01297184
10 day(s) QLIKE                    : 0.44105410
full horizon MAE                   : 2.88060415
full horizon RMSE                  : 9.72220655
full horizon R2                    : -0.04989743
full horizon Pearson r             : -0.01297184
full horizon QLIKE                 : 0.44105410

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/C/Simple_KAN_H10.pkl

Saved y_true min=0.525001, max=134.647
Saved y_pred min=1.83327, max=3.46701

=== C | H=20 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.80914068636715
  Min value:  -5.781033963264048
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.111456003225553
  Min value:  -2.5798794252140986
Checking X_price_val:
Shape: (161, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.316307789702823
  Min value:  -3.321376796599313
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.643530346347736
  Min value:  -1.664384167842813
Checking X_price_test:
Shape: (404, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.453119269597156
  Min value:  -3.4619875412520558
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.1084933809861415
  Min value:  -2.0600067160447444
Epoch 1: Train mse = 0.9689 | Val mse = 0.4977
Epoch 2: Train mse = 0.7691 | Val mse = 0.6931
Epoch 3: Train mse = 0.6687 | Val mse = 1.2204
Epoch 4: Train mse = 0.6067 | Val mse = 1.1469
Epoch 5: Train mse = 0.5584 | Val mse = 0.8185
Epoch 6: Train mse = 0.5161 | Val mse = 0.5392
Epoch 7: Train mse = 0.4671 | Val mse = 0.6711
Epoch 8: Train mse = 0.4340 | Val mse = 0.5831
Epoch 9: Train mse = 0.4072 | Val mse = 0.8267
Epoch 10: Train mse = 0.3882 | Val mse = 0.7627
Epoch 11: Train mse = 0.3697 | Val mse = 0.7614
Epoch 12: Train mse = 0.3582 | Val mse = 0.8653
Epoch 13: Train mse = 0.3472 | Val mse = 0.8639
Epoch 14: Train mse = 0.3395 | Val mse = 0.8501
Epoch 15: Train mse = 0.3327 | Val mse = 0.9073
Epoch 16: Train mse = 0.3243 | Val mse = 0.8720
Epoch 17: Train mse = 0.3184 | Val mse = 0.9416
Epoch 18: Train mse = 0.3135 | Val mse = 0.8608
Epoch 19: Train mse = 0.3086 | Val mse = 0.8853
Epoch 20: Train mse = 0.3039 | Val mse = 0.8199
Epoch 21: Train mse = 0.2986 | Val mse = 0.8661
Epoch 22: Train mse = 0.2917 | Val mse = 0.8122
Epoch 23: Train mse = 0.2857 | Val mse = 0.8362
Epoch 24: Train mse = 0.2780 | Val mse = 0.8154
Epoch 25: Train mse = 0.2728 | Val mse = 0.8950
Epoch 26: Train mse = 0.2676 | Val mse = 0.8521
Epoch 27: Train mse = 0.2616 | Val mse = 0.9180
Epoch 28: Train mse = 0.2574 | Val mse = 0.9415
Epoch 29: Train mse = 0.2506 | Val mse = 0.8795
Epoch 30: Train mse = 0.2444 | Val mse = 0.9014
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 20
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.78140564
1 day(s) RMSE                      : 9.59291763
1 day(s) R2                        : -0.03285924
1 day(s) Pearson r                 : 0.05776588
1 day(s) QLIKE                     : 0.42271778
3 day(s) MAE                       : 2.81806295
3 day(s) RMSE                      : 9.65244549
3 day(s) R2                        : -0.04061844
3 day(s) Pearson r                 : 0.03229386
3 day(s) QLIKE                     : 0.43053256
5 day(s) MAE                       : 2.84849761
5 day(s) RMSE                      : 9.66906269
5 day(s) R2                        : -0.04088038
5 day(s) Pearson r                 : 0.01926510
5 day(s) QLIKE                     : 0.43472971
10 day(s) MAE                      : 2.87875176
10 day(s) RMSE                     : 9.68831433
10 day(s) R2                       : -0.04259017
10 day(s) Pearson r                : 0.00166947
10 day(s) QLIKE                    : 0.43814978
20 day(s) MAE                      : 2.90065640
20 day(s) RMSE                     : 9.70582751
20 day(s) R2                       : -0.04553400
20 day(s) Pearson r                : -0.02452333
20 day(s) QLIKE                    : 0.44104095
full horizon MAE                   : 2.90065640
full horizon RMSE                  : 9.70582751
full horizon R2                    : -0.04553400
full horizon Pearson r             : -0.02452333
full horizon QLIKE                 : 0.44104095

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/C/Simple_KAN_H20.pkl

Saved y_true min=0.525001, max=134.647
Saved y_pred min=1.89499, max=3.39094

=== BTCUSDT | H=1 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2399
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1727, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  27.572355514246752
  Min value:  -9.895452801091551
Checking X_time_train_core:
Shape: (1727, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.8178490491226165
Checking y_train_core (log_mse scaled):
Shape: (1727, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.551349524903821
  Min value:  -3.6053341677678
Checking X_price_val:
Shape: (192, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  15.53568548101405
  Min value:  -9.62541592027249
Checking X_time_val:
Shape: (192, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_val (log_mse scaled):
Shape: (192, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.344819091237707
  Min value:  -4.24132665249135
Checking X_price_test:
Shape: (480, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  15.53568548101405
  Min value:  -9.62541592027249
Checking X_time_test:
Shape: (480, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_test (log_mse scaled):
Shape: (480, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.58025865739623
  Min value:  -2.56748600221835
Epoch 1: Train mse = 0.9189 | Val mse = 1.0587
Epoch 2: Train mse = 0.7187 | Val mse = 1.4851
Epoch 3: Train mse = 0.6062 | Val mse = 1.3982
Epoch 4: Train mse = 0.5183 | Val mse = 0.9387
Epoch 5: Train mse = 0.4804 | Val mse = 0.9768
Epoch 6: Train mse = 0.4085 | Val mse = 1.6773
Epoch 7: Train mse = 0.3750 | Val mse = 1.0119
Epoch 8: Train mse = 0.3322 | Val mse = 0.9030
Epoch 9: Train mse = 0.3115 | Val mse = 1.0684
Epoch 10: Train mse = 0.2805 | Val mse = 1.3078
Epoch 11: Train mse = 0.2567 | Val mse = 0.9353
Epoch 12: Train mse = 0.2333 | Val mse = 1.3199
Epoch 13: Train mse = 0.2077 | Val mse = 0.8679
Epoch 14: Train mse = 0.2075 | Val mse = 0.9122
Epoch 15: Train mse = 0.1659 | Val mse = 0.8736
Epoch 16: Train mse = 0.1555 | Val mse = 0.9306
Epoch 17: Train mse = 0.1284 | Val mse = 0.9109
Epoch 18: Train mse = 0.1129 | Val mse = 1.1447
Epoch 19: Train mse = 0.0979 | Val mse = 1.4625
Epoch 20: Train mse = 0.0950 | Val mse = 3.3611
Epoch 21: Train mse = 0.0922 | Val mse = 1.4194
Epoch 22: Train mse = 0.0851 | Val mse = 1.3289
Epoch 23: Train mse = 0.0731 | Val mse = 1.1190
Epoch 24: Train mse = 0.0743 | Val mse = 1.6237
Epoch 25: Train mse = 0.0663 | Val mse = 1.2205
Epoch 26: Train mse = 0.0687 | Val mse = 1.1981
Epoch 27: Train mse = 0.0572 | Val mse = 2.0278
Epoch 28: Train mse = 0.0508 | Val mse = 1.7927
Epoch 29: Train mse = 0.0512 | Val mse = 1.3616
Epoch 30: Train mse = 0.0413 | Val mse = 1.8037
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 1
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 8.13860075
1 day(s) RMSE                      : 12.57733751
1 day(s) R2                        : -0.87468978
1 day(s) Pearson r                 : 0.02130714
1 day(s) QLIKE                     : 0.85029948
full horizon MAE                   : 8.13860075
full horizon RMSE                  : 12.57733751
full horizon R2                    : -0.87468978
full horizon Pearson r             : 0.02130714
full horizon QLIKE                 : 0.85029948

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/BTCUSDT/Simple_KAN_H1.pkl

Saved y_true min=0.505764, max=131.49
Saved y_pred min=0.820745, max=54.7658

=== BTCUSDT | H=5 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2399
Time steps for y: 5
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1727, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  27.572355514246752
  Min value:  -9.895452801091551
Checking X_time_train_core:
Shape: (1727, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.8178490491226165
Checking y_train_core (log_mse scaled):
Shape: (1727, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.554196812284146
  Min value:  -3.6087233634991427
Checking X_price_val:
Shape: (192, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  15.53568548101405
  Min value:  -9.62541592027249
Checking X_time_val:
Shape: (192, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_val (log_mse scaled):
Shape: (192, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3459792971639226
  Min value:  -4.245202118935102
Checking X_price_test:
Shape: (480, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  15.53568548101405
  Min value:  -9.62541592027249
Checking X_time_test:
Shape: (480, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_test (log_mse scaled):
Shape: (480, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.58159887703065
  Min value:  -2.5700816741597805
Epoch 1: Train mse = 0.9677 | Val mse = 6.2523
Epoch 2: Train mse = 0.7805 | Val mse = 1.3760
Epoch 3: Train mse = 0.6701 | Val mse = 1.6338
Epoch 4: Train mse = 0.5894 | Val mse = 1.0724
Epoch 5: Train mse = 0.5146 | Val mse = 1.0500
Epoch 6: Train mse = 0.4678 | Val mse = 1.1254
Epoch 7: Train mse = 0.4228 | Val mse = 1.6355
Epoch 8: Train mse = 0.3853 | Val mse = 1.0510
Epoch 9: Train mse = 0.3534 | Val mse = 1.2063
Epoch 10: Train mse = 0.3325 | Val mse = 1.3071
Epoch 11: Train mse = 0.3256 | Val mse = 1.4087
Epoch 12: Train mse = 0.3128 | Val mse = 1.0918
Epoch 13: Train mse = 0.3033 | Val mse = 1.1759
Epoch 14: Train mse = 0.2979 | Val mse = 1.3591
Epoch 15: Train mse = 0.2857 | Val mse = 1.3379
Epoch 16: Train mse = 0.2812 | Val mse = 1.1855
Epoch 17: Train mse = 0.2693 | Val mse = 1.2081
Epoch 18: Train mse = 0.2541 | Val mse = 1.2354
Epoch 19: Train mse = 0.2438 | Val mse = 1.3161
Epoch 20: Train mse = 0.2311 | Val mse = 1.6134
Epoch 21: Train mse = 0.2163 | Val mse = 1.3484
Epoch 22: Train mse = 0.2090 | Val mse = 1.4041
Epoch 23: Train mse = 0.2035 | Val mse = 1.9105
Epoch 24: Train mse = 0.1945 | Val mse = 2.0005
Epoch 25: Train mse = 0.1844 | Val mse = 1.7955
Epoch 26: Train mse = 0.1766 | Val mse = 1.7927
Epoch 27: Train mse = 0.1773 | Val mse = 1.4153
Epoch 28: Train mse = 0.1711 | Val mse = 1.4700
Epoch 29: Train mse = 0.1599 | Val mse = 1.5141
Epoch 30: Train mse = 0.1566 | Val mse = 1.6103
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 5
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 5.82345109
1 day(s) RMSE                      : 10.19799445
1 day(s) R2                        : -0.23248467
1 day(s) Pearson r                 : -0.00185861
1 day(s) QLIKE                     : 0.53901923
3 day(s) MAE                       : 5.75563338
3 day(s) RMSE                      : 10.12990938
3 day(s) R2                        : -0.21591418
3 day(s) Pearson r                 : -0.00802635
3 day(s) QLIKE                     : 0.52295491
5 day(s) MAE                       : 5.69730542
5 day(s) RMSE                      : 10.11771105
5 day(s) R2                        : -0.21336792
5 day(s) Pearson r                 : -0.02628184
5 day(s) QLIKE                     : 0.52210323
full horizon MAE                   : 5.69730542
full horizon RMSE                  : 10.11771105
full horizon R2                    : -0.21336792
full horizon Pearson r             : -0.02628184
full horizon QLIKE                 : 0.52210323

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/BTCUSDT/Simple_KAN_H5.pkl

Saved y_true min=0.505764, max=131.49
Saved y_pred min=2.12485, max=23.8841

=== BTCUSDT | H=10 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2399
Time steps for y: 10
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1727, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  27.572355514246752
  Min value:  -9.895452801091551
Checking X_time_train_core:
Shape: (1727, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.8178490491226165
Checking y_train_core (log_mse scaled):
Shape: (1727, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.556915178048552
  Min value:  -3.6122923012225283
Checking X_price_val:
Shape: (192, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  15.53568548101405
  Min value:  -9.62541592027249
Checking X_time_val:
Shape: (192, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_val (log_mse scaled):
Shape: (192, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.346996833643509
  Min value:  -4.249261289934421
Checking X_price_test:
Shape: (480, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  15.53568548101405
  Min value:  -9.62541592027249
Checking X_time_test:
Shape: (480, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_test (log_mse scaled):
Shape: (480, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.582797894129229
  Min value:  -2.572850621760107
Epoch 1: Train mse = 0.9658 | Val mse = 3.4370
Epoch 2: Train mse = 0.7845 | Val mse = 1.2931
Epoch 3: Train mse = 0.6722 | Val mse = 1.8873
Epoch 4: Train mse = 0.5908 | Val mse = 1.0717
Epoch 5: Train mse = 0.5288 | Val mse = 0.9507
Epoch 6: Train mse = 0.4723 | Val mse = 1.0121
Epoch 7: Train mse = 0.4370 | Val mse = 0.9643
Epoch 8: Train mse = 0.4155 | Val mse = 1.0443
Epoch 9: Train mse = 0.3877 | Val mse = 1.1398
Epoch 10: Train mse = 0.3777 | Val mse = 1.0652
Epoch 11: Train mse = 0.3694 | Val mse = 1.0284
Epoch 12: Train mse = 0.3562 | Val mse = 1.0542
Epoch 13: Train mse = 0.3431 | Val mse = 1.0767
Epoch 14: Train mse = 0.3324 | Val mse = 1.1218
Epoch 15: Train mse = 0.3162 | Val mse = 1.1898
Epoch 16: Train mse = 0.3070 | Val mse = 1.0945
Epoch 17: Train mse = 0.2978 | Val mse = 1.0523
Epoch 18: Train mse = 0.2974 | Val mse = 1.0863
Epoch 19: Train mse = 0.2848 | Val mse = 1.0983
Epoch 20: Train mse = 0.2799 | Val mse = 1.0485
Epoch 21: Train mse = 0.2742 | Val mse = 1.1146
Epoch 22: Train mse = 0.2665 | Val mse = 1.0721
Epoch 23: Train mse = 0.2624 | Val mse = 1.0946
Epoch 24: Train mse = 0.2559 | Val mse = 1.1149
Epoch 25: Train mse = 0.2478 | Val mse = 1.1033
Epoch 26: Train mse = 0.2411 | Val mse = 1.1029
Epoch 27: Train mse = 0.2341 | Val mse = 1.2052
Epoch 28: Train mse = 0.2231 | Val mse = 1.0931
Epoch 29: Train mse = 0.2163 | Val mse = 1.0855
Epoch 30: Train mse = 0.2110 | Val mse = 1.1289
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 10
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 6.62188432
1 day(s) RMSE                      : 10.51097248
1 day(s) R2                        : -0.30929582
1 day(s) Pearson r                 : -0.03208937
1 day(s) QLIKE                     : 0.61023793
3 day(s) MAE                       : 6.58596168
3 day(s) RMSE                      : 10.49964708
3 day(s) R2                        : -0.30629482
3 day(s) Pearson r                 : -0.04335959
3 day(s) QLIKE                     : 0.61183922
5 day(s) MAE                       : 6.65578975
5 day(s) RMSE                      : 10.60236257
5 day(s) R2                        : -0.33239584
5 day(s) Pearson r                 : -0.05840230
5 day(s) QLIKE                     : 0.61850224
10 day(s) MAE                      : 6.72108002
10 day(s) RMSE                     : 10.79516623
10 day(s) R2                       : -0.38096322
10 day(s) Pearson r                : -0.08484265
10 day(s) QLIKE                    : 0.65167496
full horizon MAE                   : 6.72108002
full horizon RMSE                  : 10.79516623
full horizon R2                    : -0.38096322
full horizon Pearson r             : -0.08484265
full horizon QLIKE                 : 0.65167496

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/BTCUSDT/Simple_KAN_H10.pkl

Saved y_true min=0.505764, max=131.49
Saved y_pred min=1.66643, max=24.4854

=== BTCUSDT | H=20 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2399
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (1727, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  27.572355514246752
  Min value:  -9.895452801091551
Checking X_time_train_core:
Shape: (1727, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.8178490491226165
Checking y_train_core (log_mse scaled):
Shape: (1727, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.564043013270673
  Min value:  -3.62021477149946
Checking X_price_val:
Shape: (192, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  15.53568548101405
  Min value:  -9.62541592027249
Checking X_time_val:
Shape: (192, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_val (log_mse scaled):
Shape: (192, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3500532890780983
  Min value:  -4.258357261790038
Checking X_price_test:
Shape: (480, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  15.53568548101405
  Min value:  -9.62541592027249
Checking X_time_test:
Shape: (480, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_test (log_mse scaled):
Shape: (480, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5862887708961586
  Min value:  -2.578858106420128
Epoch 1: Train mse = 0.9458 | Val mse = 4.8254
Epoch 2: Train mse = 0.7860 | Val mse = 1.1387
Epoch 3: Train mse = 0.6754 | Val mse = 1.3654
Epoch 4: Train mse = 0.6001 | Val mse = 1.7691
Epoch 5: Train mse = 0.5405 | Val mse = 1.1357
Epoch 6: Train mse = 0.4982 | Val mse = 1.2155
Epoch 7: Train mse = 0.4747 | Val mse = 1.2795
Epoch 8: Train mse = 0.4565 | Val mse = 1.0976
Epoch 9: Train mse = 0.4231 | Val mse = 1.1916
Epoch 10: Train mse = 0.4059 | Val mse = 1.1063
Epoch 11: Train mse = 0.3941 | Val mse = 1.0082
Epoch 12: Train mse = 0.3809 | Val mse = 1.0510
Epoch 13: Train mse = 0.3677 | Val mse = 1.2347
Epoch 14: Train mse = 0.3551 | Val mse = 1.3544
Epoch 15: Train mse = 0.3415 | Val mse = 1.2495
Epoch 16: Train mse = 0.3319 | Val mse = 1.3614
Epoch 17: Train mse = 0.3188 | Val mse = 1.5409
Epoch 18: Train mse = 0.3193 | Val mse = 1.4874
Epoch 19: Train mse = 0.3136 | Val mse = 1.5253
Epoch 20: Train mse = 0.3055 | Val mse = 1.6405
Epoch 21: Train mse = 0.3038 | Val mse = 1.6023
Epoch 22: Train mse = 0.2966 | Val mse = 1.6163
Epoch 23: Train mse = 0.2911 | Val mse = 1.5803
Epoch 24: Train mse = 0.2860 | Val mse = 1.5717
Epoch 25: Train mse = 0.2819 | Val mse = 1.4083
Epoch 26: Train mse = 0.2757 | Val mse = 1.4354
Epoch 27: Train mse = 0.2720 | Val mse = 1.3041
Epoch 28: Train mse = 0.2646 | Val mse = 1.4788
Epoch 29: Train mse = 0.2602 | Val mse = 1.4300
Epoch 30: Train mse = 0.2565 | Val mse = 1.4941
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 20
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 5.69053419
1 day(s) RMSE                      : 10.09117056
1 day(s) R2                        : -0.20679937
1 day(s) Pearson r                 : -0.01686821
1 day(s) QLIKE                     : 0.73154797
3 day(s) MAE                       : 5.74919656
3 day(s) RMSE                      : 10.16137748
3 day(s) R2                        : -0.22348028
3 day(s) Pearson r                 : -0.01178129
3 day(s) QLIKE                     : 0.83362848
5 day(s) MAE                       : 5.87922295
5 day(s) RMSE                      : 10.26529354
5 day(s) R2                        : -0.24902379
5 day(s) Pearson r                 : -0.02503625
5 day(s) QLIKE                     : 0.89063744
10 day(s) MAE                      : 6.61004228
10 day(s) RMSE                     : 11.20251047
10 day(s) R2                       : -0.48714790
10 day(s) Pearson r                : -0.05121636
10 day(s) QLIKE                    : 0.95305829
20 day(s) MAE                      : 10.31175398
20 day(s) RMSE                     : 17.94454262
20 day(s) R2                       : -2.81717106
20 day(s) Pearson r                : -0.04537579
20 day(s) QLIKE                    : 0.99695651
full horizon MAE                   : 10.31175398
full horizon RMSE                  : 17.94454262
full horizon R2                    : -2.81717106
full horizon Pearson r             : -0.04537579
full horizon QLIKE                 : 0.99695651

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/BTCUSDT/Simple_KAN_H20.pkl

Saved y_true min=0.505764, max=131.49
Saved y_pred min=0.435099, max=97.4616

=== EURUSD | H=1 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  32.883394272790085
  Min value:  -3.1812809750988515
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.9963965101732293
  Min value:  -3.718602223017603
Checking X_price_val:
Shape: (302, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  41.477005683820344
  Min value:  -2.633090998920803
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.319059579642504
  Min value:  -2.975928211620611
Checking X_price_test:
Shape: (757, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144347
  Min value:  -2.633090998920803
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3834322546316056
  Min value:  -5.349969967608034
Epoch 1: Train mse = 0.9459 | Val mse = 0.8360
Epoch 2: Train mse = 0.7697 | Val mse = 0.7617
Epoch 3: Train mse = 0.5724 | Val mse = 0.7264
Epoch 4: Train mse = 0.4153 | Val mse = 0.6392
Epoch 5: Train mse = 0.3164 | Val mse = 0.6767
Epoch 6: Train mse = 0.3055 | Val mse = 0.5418
Epoch 7: Train mse = 0.2527 | Val mse = 0.5381
Epoch 8: Train mse = 0.2394 | Val mse = 0.5073
Epoch 9: Train mse = 0.2163 | Val mse = 0.4973
Epoch 10: Train mse = 0.2011 | Val mse = 0.4775
Epoch 11: Train mse = 0.1659 | Val mse = 0.4836
Epoch 12: Train mse = 0.1304 | Val mse = 0.5109
Epoch 13: Train mse = 0.1147 | Val mse = 0.5105
Epoch 14: Train mse = 0.1055 | Val mse = 0.4988
Epoch 15: Train mse = 0.0901 | Val mse = 0.5236
Epoch 16: Train mse = 0.0886 | Val mse = 0.5516
Epoch 17: Train mse = 0.0764 | Val mse = 0.5573
Epoch 18: Train mse = 0.0556 | Val mse = 0.5686
Epoch 19: Train mse = 0.0529 | Val mse = 0.5741
Epoch 20: Train mse = 0.0479 | Val mse = 0.5439
Epoch 21: Train mse = 0.0379 | Val mse = 0.5840
Epoch 22: Train mse = 0.0350 | Val mse = 0.5568
Epoch 23: Train mse = 0.0292 | Val mse = 0.5499
Epoch 24: Train mse = 0.0229 | Val mse = 0.5257
Epoch 25: Train mse = 0.0224 | Val mse = 0.5352
Epoch 26: Train mse = 0.0223 | Val mse = 0.5238
Epoch 27: Train mse = 0.0172 | Val mse = 0.5264
Epoch 28: Train mse = 0.0143 | Val mse = 0.5169
Epoch 29: Train mse = 0.0109 | Val mse = 0.5408
Epoch 30: Train mse = 0.0097 | Val mse = 0.5212
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 1
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.09491428
1 day(s) RMSE                      : 0.17759531
1 day(s) R2                        : 0.30898839
1 day(s) Pearson r                 : 0.57442306
1 day(s) QLIKE                     : 0.38893317
full horizon MAE                   : 0.09491428
full horizon RMSE                  : 0.17759531
full horizon R2                    : 0.30898839
full horizon Pearson r             : 0.57442306
full horizon QLIKE                 : 0.38893317

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/EURUSD/Simple_KAN_H1.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00485749, max=0.683331

=== EURUSD | H=5 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 5
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  32.883394272790085
  Min value:  -3.1812809750988515
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.9970127449111077
  Min value:  -3.720451582171488
Checking X_price_val:
Shape: (302, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  41.477005683820344
  Min value:  -2.633090998920803
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.319427111757353
  Min value:  -2.9775048778703237
Checking X_price_test:
Shape: (757, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144347
  Min value:  -2.633090998920803
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3838234229198263
  Min value:  -5.352418327671095
Epoch 1: Train mse = 0.9411 | Val mse = 0.9225
Epoch 2: Train mse = 0.8420 | Val mse = 0.8960
Epoch 3: Train mse = 0.7137 | Val mse = 0.8350
Epoch 4: Train mse = 0.5723 | Val mse = 0.7393
Epoch 5: Train mse = 0.4703 | Val mse = 0.5958
Epoch 6: Train mse = 0.3415 | Val mse = 0.5983
Epoch 7: Train mse = 0.2796 | Val mse = 0.5707
Epoch 8: Train mse = 0.2439 | Val mse = 0.5406
Epoch 9: Train mse = 0.2137 | Val mse = 0.5143
Epoch 10: Train mse = 0.1918 | Val mse = 0.5300
Epoch 11: Train mse = 0.1731 | Val mse = 0.5599
Epoch 12: Train mse = 0.1539 | Val mse = 0.5394
Epoch 13: Train mse = 0.1429 | Val mse = 0.5588
Epoch 14: Train mse = 0.1303 | Val mse = 0.5631
Epoch 15: Train mse = 0.1162 | Val mse = 0.5572
Epoch 16: Train mse = 0.1032 | Val mse = 0.5684
Epoch 17: Train mse = 0.0952 | Val mse = 0.5893
Epoch 18: Train mse = 0.0900 | Val mse = 0.5946
Epoch 19: Train mse = 0.0809 | Val mse = 0.5732
Epoch 20: Train mse = 0.0739 | Val mse = 0.5945
Epoch 21: Train mse = 0.0691 | Val mse = 0.5812
Epoch 22: Train mse = 0.0606 | Val mse = 0.5809
Epoch 23: Train mse = 0.0542 | Val mse = 0.5938
Epoch 24: Train mse = 0.0492 | Val mse = 0.5887
Epoch 25: Train mse = 0.0441 | Val mse = 0.6032
Epoch 26: Train mse = 0.0399 | Val mse = 0.6002
Epoch 27: Train mse = 0.0376 | Val mse = 0.6090
Epoch 28: Train mse = 0.0344 | Val mse = 0.6110
Epoch 29: Train mse = 0.0303 | Val mse = 0.6045
Epoch 30: Train mse = 0.0272 | Val mse = 0.6093
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 5
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.10499429
1 day(s) RMSE                      : 0.17949415
1 day(s) R2                        : 0.29413286
1 day(s) Pearson r                 : 0.54864316
1 day(s) QLIKE                     : 0.41010425
3 day(s) MAE                       : 0.10573220
3 day(s) RMSE                      : 0.18795305
3 day(s) R2                        : 0.22006446
3 day(s) Pearson r                 : 0.47889539
3 day(s) QLIKE                     : 0.47402730
5 day(s) MAE                       : 0.10652936
5 day(s) RMSE                      : 0.18927968
5 day(s) R2                        : 0.20002715
5 day(s) Pearson r                 : 0.46411943
5 day(s) QLIKE                     : 0.51436557
full horizon MAE                   : 0.10652936
full horizon RMSE                  : 0.18927968
full horizon R2                    : 0.20002715
full horizon Pearson r             : 0.46411943
full horizon QLIKE                 : 0.51436557

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/EURUSD/Simple_KAN_H5.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00265448, max=0.942698

=== EURUSD | H=10 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 10
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  32.883394272790085
  Min value:  -3.1812809750988515
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.997326788566986
  Min value:  -3.719996871136433
Checking X_price_val:
Shape: (302, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  41.477005683820344
  Min value:  -2.633090998920803
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.319755344427202
  Min value:  -2.977065724545838
Checking X_price_test:
Shape: (757, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144347
  Min value:  -2.633090998920803
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.384150307095678
  Min value:  -5.351929442359286
Epoch 1: Train mse = 0.9569 | Val mse = 1.0307
Epoch 2: Train mse = 0.8781 | Val mse = 0.8811
Epoch 3: Train mse = 0.7903 | Val mse = 0.8662
Epoch 4: Train mse = 0.6464 | Val mse = 0.7907
Epoch 5: Train mse = 0.5335 | Val mse = 0.7208
Epoch 6: Train mse = 0.4160 | Val mse = 0.6152
Epoch 7: Train mse = 0.3265 | Val mse = 0.6118
Epoch 8: Train mse = 0.2705 | Val mse = 0.5728
Epoch 9: Train mse = 0.2379 | Val mse = 0.5968
Epoch 10: Train mse = 0.2122 | Val mse = 0.5843
Epoch 11: Train mse = 0.1966 | Val mse = 0.5778
Epoch 12: Train mse = 0.1839 | Val mse = 0.5702
Epoch 13: Train mse = 0.1756 | Val mse = 0.5900
Epoch 14: Train mse = 0.1702 | Val mse = 0.5793
Epoch 15: Train mse = 0.1596 | Val mse = 0.5835
Epoch 16: Train mse = 0.1480 | Val mse = 0.5771
Epoch 17: Train mse = 0.1415 | Val mse = 0.5870
Epoch 18: Train mse = 0.1354 | Val mse = 0.5739
Epoch 19: Train mse = 0.1296 | Val mse = 0.5890
Epoch 20: Train mse = 0.1235 | Val mse = 0.5853
Epoch 21: Train mse = 0.1181 | Val mse = 0.6008
Epoch 22: Train mse = 0.1130 | Val mse = 0.5975
Epoch 23: Train mse = 0.1029 | Val mse = 0.6108
Epoch 24: Train mse = 0.0982 | Val mse = 0.6086
Epoch 25: Train mse = 0.0888 | Val mse = 0.6154
Epoch 26: Train mse = 0.0796 | Val mse = 0.6266
Epoch 27: Train mse = 0.0752 | Val mse = 0.6241
Epoch 28: Train mse = 0.0657 | Val mse = 0.6472
Epoch 29: Train mse = 0.0628 | Val mse = 0.6408
Epoch 30: Train mse = 0.0582 | Val mse = 0.6627
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 10
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.11183751
1 day(s) RMSE                      : 0.19050185
1 day(s) R2                        : 0.20490183
1 day(s) Pearson r                 : 0.48022368
1 day(s) QLIKE                     : 0.47743373
3 day(s) MAE                       : 0.11470704
3 day(s) RMSE                      : 0.19613978
3 day(s) R2                        : 0.15064089
3 day(s) Pearson r                 : 0.44405882
3 day(s) QLIKE                     : 0.53790745
5 day(s) MAE                       : 0.11531436
5 day(s) RMSE                      : 0.19692296
5 day(s) R2                        : 0.13411546
5 day(s) Pearson r                 : 0.43731184
5 day(s) QLIKE                     : 0.58499440
10 day(s) MAE                      : 0.11878762
10 day(s) RMSE                     : 0.20179227
10 day(s) R2                       : 0.08208551
10 day(s) Pearson r                : 0.40963187
10 day(s) QLIKE                    : 0.57625799
full horizon MAE                   : 0.11878762
full horizon RMSE                  : 0.20179227
full horizon R2                    : 0.08208551
full horizon Pearson r             : 0.40963187
full horizon QLIKE                 : 0.57625799

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/EURUSD/Simple_KAN_H10.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00293483, max=1.00033

=== EURUSD | H=20 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  32.883394272790085
  Min value:  -3.1812809750988515
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.998957169490066
  Min value:  -3.7200073694899434
Checking X_price_val:
Shape: (302, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  41.477005683820344
  Min value:  -2.633090998920803
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3212202110774225
  Min value:  -2.9768947428361368
Checking X_price_test:
Shape: (757, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144347
  Min value:  -2.633090998920803
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3856309038737162
  Min value:  -5.3523385822659755
Epoch 1: Train mse = 0.9524 | Val mse = 0.9543
Epoch 2: Train mse = 0.8726 | Val mse = 0.8987
Epoch 3: Train mse = 0.7890 | Val mse = 0.8489
Epoch 4: Train mse = 0.6637 | Val mse = 0.8009
Epoch 5: Train mse = 0.5576 | Val mse = 0.7242
Epoch 6: Train mse = 0.4237 | Val mse = 0.6625
Epoch 7: Train mse = 0.3252 | Val mse = 0.6084
Epoch 8: Train mse = 0.2840 | Val mse = 0.5212
Epoch 9: Train mse = 0.2493 | Val mse = 0.5130
Epoch 10: Train mse = 0.2284 | Val mse = 0.5078
Epoch 11: Train mse = 0.2171 | Val mse = 0.5289
Epoch 12: Train mse = 0.2072 | Val mse = 0.5303
Epoch 13: Train mse = 0.1975 | Val mse = 0.5304
Epoch 14: Train mse = 0.1908 | Val mse = 0.5427
Epoch 15: Train mse = 0.1826 | Val mse = 0.5379
Epoch 16: Train mse = 0.1740 | Val mse = 0.5590
Epoch 17: Train mse = 0.1662 | Val mse = 0.5621
Epoch 18: Train mse = 0.1591 | Val mse = 0.5605
Epoch 19: Train mse = 0.1543 | Val mse = 0.5729
Epoch 20: Train mse = 0.1474 | Val mse = 0.5717
Epoch 21: Train mse = 0.1435 | Val mse = 0.5753
Epoch 22: Train mse = 0.1382 | Val mse = 0.5779
Epoch 23: Train mse = 0.1325 | Val mse = 0.5711
Epoch 24: Train mse = 0.1300 | Val mse = 0.5705
Epoch 25: Train mse = 0.1267 | Val mse = 0.5908
Epoch 26: Train mse = 0.1222 | Val mse = 0.5701
Epoch 27: Train mse = 0.1191 | Val mse = 0.5794
Epoch 28: Train mse = 0.1116 | Val mse = 0.5986
Epoch 29: Train mse = 0.1093 | Val mse = 0.6016
Epoch 30: Train mse = 0.1039 | Val mse = 0.5950
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 20
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.12204112
1 day(s) RMSE                      : 0.20650889
1 day(s) R2                        : 0.06567093
1 day(s) Pearson r                 : 0.39679037
1 day(s) QLIKE                     : 0.49416424
3 day(s) MAE                       : 0.12352359
3 day(s) RMSE                      : 0.20620768
3 day(s) R2                        : 0.06120739
3 day(s) Pearson r                 : 0.39463298
3 day(s) QLIKE                     : 0.51977512
5 day(s) MAE                       : 0.12439277
5 day(s) RMSE                      : 0.20655070
5 day(s) R2                        : 0.04737798
5 day(s) Pearson r                 : 0.38707727
5 day(s) QLIKE                     : 0.53100790
10 day(s) MAE                      : 0.12520048
10 day(s) RMSE                     : 0.20732000
10 day(s) R2                       : 0.03110753
10 day(s) Pearson r                : 0.37850840
10 day(s) QLIKE                    : 0.53271209
20 day(s) MAE                      : 0.12970765
20 day(s) RMSE                     : 0.20972222
20 day(s) R2                       : -0.00316927
20 day(s) Pearson r                : 0.37101763
20 day(s) QLIKE                    : 0.54586285
full horizon MAE                   : 0.12970765
full horizon RMSE                  : 0.20972222
full horizon R2                    : -0.00316927
full horizon Pearson r             : 0.37101763
full horizon QLIKE                 : 0.54586285

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/EURUSD/Simple_KAN_H20.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00379213, max=0.880942

=== GOLD | H=1 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 5534
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  24.476276295980508
  Min value:  -4.11251005137408
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.28323490000094
  Min value:  -5.250298730157779
Checking X_price_val:
Shape: (443, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722397423
  Min value:  -4.11251005137408
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.7902797110229713
  Min value:  -2.119473437475616
Checking X_price_test:
Shape: (1107, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698434
  Min value:  -4.11251005137408
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5405778347636028
  Min value:  -2.3606736150182797
Epoch 1: Train mse = 0.9784 | Val mse = 0.4572
Epoch 2: Train mse = 0.8045 | Val mse = 0.7247
Epoch 3: Train mse = 0.6376 | Val mse = 0.4796
Epoch 4: Train mse = 0.4392 | Val mse = 0.4131
Epoch 5: Train mse = 0.3412 | Val mse = 0.4602
Epoch 6: Train mse = 0.3062 | Val mse = 0.3424
Epoch 7: Train mse = 0.2533 | Val mse = 0.3736
Epoch 8: Train mse = 0.2235 | Val mse = 0.3980
Epoch 9: Train mse = 0.1953 | Val mse = 0.3878
Epoch 10: Train mse = 0.1777 | Val mse = 0.3359
Epoch 11: Train mse = 0.1515 | Val mse = 0.3563
Epoch 12: Train mse = 0.1231 | Val mse = 0.3724
Epoch 13: Train mse = 0.1011 | Val mse = 0.3815
Epoch 14: Train mse = 0.0881 | Val mse = 0.3751
Epoch 15: Train mse = 0.0776 | Val mse = 0.3500
Epoch 16: Train mse = 0.0688 | Val mse = 0.3938
Epoch 17: Train mse = 0.0593 | Val mse = 0.4051
Epoch 18: Train mse = 0.0544 | Val mse = 0.3742
Epoch 19: Train mse = 0.0500 | Val mse = 0.3877
Epoch 20: Train mse = 0.0464 | Val mse = 0.4668
Epoch 21: Train mse = 0.0412 | Val mse = 0.3768
Epoch 22: Train mse = 0.0330 | Val mse = 0.3312
Epoch 23: Train mse = 0.0278 | Val mse = 0.3658
Epoch 24: Train mse = 0.0263 | Val mse = 0.3383
Epoch 25: Train mse = 0.0223 | Val mse = 0.3207
Epoch 26: Train mse = 0.0209 | Val mse = 0.3694
Epoch 27: Train mse = 0.0186 | Val mse = 0.3008
Epoch 28: Train mse = 0.0151 | Val mse = 0.4344
Epoch 29: Train mse = 0.0128 | Val mse = 0.3296
Epoch 30: Train mse = 0.0110 | Val mse = 0.3886
Epoch 31: Train mse = 0.0098 | Val mse = 0.3715
Epoch 32: Train mse = 0.0083 | Val mse = 0.3299
Epoch 33: Train mse = 0.0063 | Val mse = 0.3950
Epoch 34: Train mse = 0.0058 | Val mse = 0.3434
Epoch 35: Train mse = 0.0047 | Val mse = 0.3443
Epoch 36: Train mse = 0.0037 | Val mse = 0.3667
Epoch 37: Train mse = 0.0035 | Val mse = 0.3648
Early stopping triggered at epoch 37.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 1
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.38676369
1 day(s) RMSE                      : 0.66744985
1 day(s) R2                        : -0.03737743
1 day(s) Pearson r                 : 0.31833814
1 day(s) QLIKE                     : 0.94085391
full horizon MAE                   : 0.38676369
full horizon RMSE                  : 0.66744985
full horizon R2                    : -0.03737743
full horizon Pearson r             : 0.31833814
full horizon QLIKE                 : 0.94085391

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/GOLD/Simple_KAN_H1.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.00151417, max=4.90651

=== GOLD | H=5 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 5534
Time steps for y: 5
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  24.476276295980508
  Min value:  -4.11251005137408
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.283677745863046
  Min value:  -5.250623802304629
Checking X_price_val:
Shape: (443, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722397423
  Min value:  -4.11251005137408
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.7906723083316343
  Min value:  -2.1194793742414912
Checking X_price_test:
Shape: (1107, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698434
  Min value:  -4.11251005137408
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5409449791338357
  Min value:  -2.3607041381163065
Epoch 1: Train mse = 0.9525 | Val mse = 0.4721
Epoch 2: Train mse = 0.8112 | Val mse = 0.8140
Epoch 3: Train mse = 0.6319 | Val mse = 0.6563
Epoch 4: Train mse = 0.4541 | Val mse = 0.6282
Epoch 5: Train mse = 0.3632 | Val mse = 0.5353
Epoch 6: Train mse = 0.2906 | Val mse = 0.5832
Epoch 7: Train mse = 0.2380 | Val mse = 0.6210
Epoch 8: Train mse = 0.2086 | Val mse = 0.6716
Epoch 9: Train mse = 0.1781 | Val mse = 0.5720
Epoch 10: Train mse = 0.1600 | Val mse = 0.5357
Epoch 11: Train mse = 0.1378 | Val mse = 0.5577
Epoch 12: Train mse = 0.1202 | Val mse = 0.5714
Epoch 13: Train mse = 0.1147 | Val mse = 0.6073
Epoch 14: Train mse = 0.1019 | Val mse = 0.6614
Epoch 15: Train mse = 0.0929 | Val mse = 0.5657
Epoch 16: Train mse = 0.0842 | Val mse = 0.5773
Epoch 17: Train mse = 0.0758 | Val mse = 0.5916
Epoch 18: Train mse = 0.0713 | Val mse = 0.5440
Epoch 19: Train mse = 0.0651 | Val mse = 0.5866
Epoch 20: Train mse = 0.0605 | Val mse = 0.5377
Epoch 21: Train mse = 0.0595 | Val mse = 0.7307
Epoch 22: Train mse = 0.0631 | Val mse = 0.5537
Epoch 23: Train mse = 0.0585 | Val mse = 0.5791
Epoch 24: Train mse = 0.0539 | Val mse = 0.5696
Epoch 25: Train mse = 0.0500 | Val mse = 0.5718
Epoch 26: Train mse = 0.0454 | Val mse = 0.5980
Epoch 27: Train mse = 0.0425 | Val mse = 0.6870
Epoch 28: Train mse = 0.0378 | Val mse = 0.6056
Epoch 29: Train mse = 0.0334 | Val mse = 0.5737
Epoch 30: Train mse = 0.0295 | Val mse = 0.6576
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 5
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.43261017
1 day(s) RMSE                      : 0.72301273
1 day(s) R2                        : -0.21728257
1 day(s) Pearson r                 : -0.00454430
1 day(s) QLIKE                     : 0.60429656
3 day(s) MAE                       : 0.43293494
3 day(s) RMSE                      : 0.72236446
3 day(s) R2                        : -0.21408600
3 day(s) Pearson r                 : 0.00375034
3 day(s) QLIKE                     : 0.66262816
5 day(s) MAE                       : 0.43836621
5 day(s) RMSE                      : 0.72398674
5 day(s) R2                        : -0.21607568
5 day(s) Pearson r                 : 0.03062642
5 day(s) QLIKE                     : 0.70005148
full horizon MAE                   : 0.43836621
full horizon RMSE                  : 0.72398674
full horizon R2                    : -0.21607568
full horizon Pearson r             : 0.03062642
full horizon QLIKE                 : 0.70005148

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/GOLD/Simple_KAN_H5.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.0887005, max=1.09069

=== GOLD | H=10 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 5534
Time steps for y: 10
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  24.476276295980508
  Min value:  -4.11251005137408
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.2851543622719985
  Min value:  -5.254244390683224
Checking X_price_val:
Shape: (443, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722397423
  Min value:  -4.11251005137408
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.7918153902171272
  Min value:  -2.1209816396053798
Checking X_price_test:
Shape: (1107, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698434
  Min value:  -4.11251005137408
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5419191122017333
  Min value:  -2.3623696000304566
Epoch 1: Train mse = 0.9589 | Val mse = 0.4969
Epoch 2: Train mse = 0.8574 | Val mse = 0.5821
Epoch 3: Train mse = 0.6787 | Val mse = 0.6394
Epoch 4: Train mse = 0.5145 | Val mse = 0.5872
Epoch 5: Train mse = 0.3981 | Val mse = 0.5317
Epoch 6: Train mse = 0.3156 | Val mse = 0.5342
Epoch 7: Train mse = 0.2587 | Val mse = 0.5327
Epoch 8: Train mse = 0.2275 | Val mse = 0.5322
Epoch 9: Train mse = 0.2015 | Val mse = 0.6051
Epoch 10: Train mse = 0.1746 | Val mse = 0.5668
Epoch 11: Train mse = 0.1521 | Val mse = 0.6129
Epoch 12: Train mse = 0.1420 | Val mse = 0.5687
Epoch 13: Train mse = 0.1331 | Val mse = 0.6146
Epoch 14: Train mse = 0.1212 | Val mse = 0.6169
Epoch 15: Train mse = 0.1150 | Val mse = 0.5946
Epoch 16: Train mse = 0.1081 | Val mse = 0.6037
Epoch 17: Train mse = 0.1038 | Val mse = 0.5944
Epoch 18: Train mse = 0.1001 | Val mse = 0.6333
Epoch 19: Train mse = 0.0960 | Val mse = 0.5982
Epoch 20: Train mse = 0.0893 | Val mse = 0.6201
Epoch 21: Train mse = 0.0866 | Val mse = 0.5753
Epoch 22: Train mse = 0.0816 | Val mse = 0.6402
Epoch 23: Train mse = 0.0761 | Val mse = 0.5879
Epoch 24: Train mse = 0.0714 | Val mse = 0.6175
Epoch 25: Train mse = 0.0672 | Val mse = 0.6377
Epoch 26: Train mse = 0.0658 | Val mse = 0.6018
Epoch 27: Train mse = 0.0621 | Val mse = 0.5868
Epoch 28: Train mse = 0.0582 | Val mse = 0.6277
Epoch 29: Train mse = 0.0550 | Val mse = 0.5891
Epoch 30: Train mse = 0.0512 | Val mse = 0.6193
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 10
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.44191034
1 day(s) RMSE                      : 0.73093938
1 day(s) R2                        : -0.24411991
1 day(s) Pearson r                 : 0.02979442
1 day(s) QLIKE                     : 0.56731346
3 day(s) MAE                       : 0.44417002
3 day(s) RMSE                      : 0.73278460
3 day(s) R2                        : -0.24936512
3 day(s) Pearson r                 : 0.03331294
3 day(s) QLIKE                     : 0.60051266
5 day(s) MAE                       : 0.45005571
5 day(s) RMSE                      : 0.73767078
5 day(s) R2                        : -0.26248013
5 day(s) Pearson r                 : 0.04255246
5 day(s) QLIKE                     : 0.62789751
10 day(s) MAE                      : 0.45072328
10 day(s) RMSE                     : 0.74178540
10 day(s) R2                       : -0.26236337
10 day(s) Pearson r                : 0.03562448
10 day(s) QLIKE                    : 0.62136987
full horizon MAE                   : 0.45072328
full horizon RMSE                  : 0.74178540
full horizon R2                    : -0.26236337
full horizon Pearson r             : 0.03562448
full horizon QLIKE                 : 0.62136987

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/GOLD/Simple_KAN_H10.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.121399, max=0.825749

=== GOLD | H=20 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 5534
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  24.476276295980508
  Min value:  -4.11251005137408
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.2870228653696634
  Min value:  -5.257736685381005
Checking X_price_val:
Shape: (443, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722397423
  Min value:  -4.11251005137408
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.793333110638287
  Min value:  -2.1222460659281195
Checking X_price_test:
Shape: (1107, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698434
  Min value:  -4.11251005137408
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5432591469134391
  Min value:  -2.3638056623268993
Epoch 1: Train mse = 0.9766 | Val mse = 0.4948
Epoch 2: Train mse = 0.8864 | Val mse = 0.6731
Epoch 3: Train mse = 0.7294 | Val mse = 0.5712
Epoch 4: Train mse = 0.5773 | Val mse = 0.6383
Epoch 5: Train mse = 0.4666 | Val mse = 0.6148
Epoch 6: Train mse = 0.3710 | Val mse = 0.6312
Epoch 7: Train mse = 0.3175 | Val mse = 0.5935
Epoch 8: Train mse = 0.2746 | Val mse = 0.6203
Epoch 9: Train mse = 0.2331 | Val mse = 0.6467
Epoch 10: Train mse = 0.2072 | Val mse = 0.6262
Epoch 11: Train mse = 0.1859 | Val mse = 0.5922
Epoch 12: Train mse = 0.1712 | Val mse = 0.6188
Epoch 13: Train mse = 0.1723 | Val mse = 0.6207
Epoch 14: Train mse = 0.1675 | Val mse = 0.6273
Epoch 15: Train mse = 0.1580 | Val mse = 0.6050
Epoch 16: Train mse = 0.1494 | Val mse = 0.6433
Epoch 17: Train mse = 0.1408 | Val mse = 0.5747
Epoch 18: Train mse = 0.1357 | Val mse = 0.6503
Epoch 19: Train mse = 0.1315 | Val mse = 0.5805
Epoch 20: Train mse = 0.1260 | Val mse = 0.5947
Epoch 21: Train mse = 0.1212 | Val mse = 0.5726
Epoch 22: Train mse = 0.1165 | Val mse = 0.5743
Epoch 23: Train mse = 0.1131 | Val mse = 0.6246
Epoch 24: Train mse = 0.1110 | Val mse = 0.5669
Epoch 25: Train mse = 0.1097 | Val mse = 0.6082
Epoch 26: Train mse = 0.1036 | Val mse = 0.6203
Epoch 27: Train mse = 0.0979 | Val mse = 0.6229
Epoch 28: Train mse = 0.0964 | Val mse = 0.5830
Epoch 29: Train mse = 0.0925 | Val mse = 0.5905
Epoch 30: Train mse = 0.0870 | Val mse = 0.6088
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 20
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.43970117
1 day(s) RMSE                      : 0.72863810
1 day(s) R2                        : -0.23629828
1 day(s) Pearson r                 : 0.01155985
1 day(s) QLIKE                     : 0.57540255
3 day(s) MAE                       : 0.44337336
3 day(s) RMSE                      : 0.73552009
3 day(s) R2                        : -0.25871032
3 day(s) Pearson r                 : -0.00536045
3 day(s) QLIKE                     : 0.60369680
5 day(s) MAE                       : 0.45063159
5 day(s) RMSE                      : 0.74342924
5 day(s) R2                        : -0.28226760
5 day(s) Pearson r                 : -0.01386635
5 day(s) QLIKE                     : 0.61461152
10 day(s) MAE                      : 0.45420411
10 day(s) RMSE                     : 0.74866485
10 day(s) R2                       : -0.28588669
10 day(s) Pearson r                : -0.01666642
10 day(s) QLIKE                    : 0.63781097
20 day(s) MAE                      : 0.46120219
20 day(s) RMSE                     : 0.75868553
20 day(s) R2                       : -0.30104890
20 day(s) Pearson r                : -0.03180900
20 day(s) QLIKE                    : 0.64321047
full horizon MAE                   : 0.46120219
full horizon RMSE                  : 0.75868553
full horizon R2                    : -0.30104890
full horizon Pearson r             : -0.03180900
full horizon QLIKE                 : 0.64321047

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/GOLD/Simple_KAN_H20.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.0981536, max=0.778884

=== SP500 | H=1 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3466
Time steps for y: 1
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790614
  Min value:  -4.314392989382238
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.598103080699805
  Min value:  -18.186962127218408
Checking X_price_val:
Shape: (277, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.189039765537131
  Min value:  -4.286988967386013
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3850009093345803
  Min value:  -2.4181466917899535
Checking X_price_test:
Shape: (694, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.658768610066629
  Min value:  -4.286988967386013
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 1, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.3795163284544953
  Min value:  -3.2441546953028633
Epoch 1: Train mse = 0.8908 | Val mse = 0.8763
Epoch 2: Train mse = 0.7782 | Val mse = 0.8084
Epoch 3: Train mse = 0.6866 | Val mse = 0.9164
Epoch 4: Train mse = 0.6588 | Val mse = 0.7592
Epoch 5: Train mse = 0.6028 | Val mse = 0.7244
Epoch 6: Train mse = 0.5428 | Val mse = 0.6834
Epoch 7: Train mse = 0.4798 | Val mse = 0.6800
Epoch 8: Train mse = 0.4355 | Val mse = 0.6871
Epoch 9: Train mse = 0.3652 | Val mse = 0.6724
Epoch 10: Train mse = 0.3424 | Val mse = 0.6625
Epoch 11: Train mse = 0.4242 | Val mse = 0.5855
Epoch 12: Train mse = 0.3358 | Val mse = 0.6492
Epoch 13: Train mse = 0.2728 | Val mse = 0.6278
Epoch 14: Train mse = 0.2629 | Val mse = 0.6348
Epoch 15: Train mse = 0.2501 | Val mse = 0.6118
Epoch 16: Train mse = 0.2294 | Val mse = 0.6093
Epoch 17: Train mse = 0.1921 | Val mse = 0.6199
Epoch 18: Train mse = 0.1607 | Val mse = 0.6730
Epoch 19: Train mse = 0.1336 | Val mse = 0.6512
Epoch 20: Train mse = 0.1171 | Val mse = 0.6471
Epoch 21: Train mse = 0.1052 | Val mse = 0.6550
Epoch 22: Train mse = 0.0955 | Val mse = 0.6393
Epoch 23: Train mse = 0.0961 | Val mse = 0.6208
Epoch 24: Train mse = 0.0839 | Val mse = 0.6702
Epoch 25: Train mse = 0.0689 | Val mse = 0.6491
Epoch 26: Train mse = 0.0714 | Val mse = 0.6441
Epoch 27: Train mse = 0.0712 | Val mse = 0.6205
Epoch 28: Train mse = 0.0627 | Val mse = 0.6586
Epoch 29: Train mse = 0.0606 | Val mse = 0.6550
Epoch 30: Train mse = 0.0607 | Val mse = 0.6676
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 1
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.62289648
1 day(s) RMSE                      : 2.31978380
1 day(s) R2                        : 0.32886702
1 day(s) Pearson r                 : 0.61070392
1 day(s) QLIKE                     : 0.82294709
full horizon MAE                   : 0.62289648
full horizon RMSE                  : 2.31978380
full horizon R2                    : 0.32886702
full horizon Pearson r             : 0.61070392
full horizon QLIKE                 : 0.82294709

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/SP500/Simple_KAN_H1.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.0213968, max=13.8523

=== SP500 | H=5 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3466
Time steps for y: 5
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790614
  Min value:  -4.314392989382238
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.596907486751884
  Min value:  -18.180099645819357
Checking X_price_val:
Shape: (277, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.189039765537131
  Min value:  -4.286988967386013
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.384254029613913
  Min value:  -2.417116935778614
Checking X_price_test:
Shape: (694, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.658768610066629
  Min value:  -4.286988967386013
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 5, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.3784015875360525
  Min value:  -3.2428194072829677
Epoch 1: Train mse = 0.9361 | Val mse = 0.9957
Epoch 2: Train mse = 0.8169 | Val mse = 0.8201
Epoch 3: Train mse = 0.7190 | Val mse = 0.8749
Epoch 4: Train mse = 0.6708 | Val mse = 0.8321
Epoch 5: Train mse = 0.6311 | Val mse = 0.8035
Epoch 6: Train mse = 0.5932 | Val mse = 0.8114
Epoch 7: Train mse = 0.5594 | Val mse = 0.8312
Epoch 8: Train mse = 0.5406 | Val mse = 0.8448
Epoch 9: Train mse = 0.5248 | Val mse = 0.8343
Epoch 10: Train mse = 0.5125 | Val mse = 0.8385
Epoch 11: Train mse = 0.5060 | Val mse = 0.8241
Epoch 12: Train mse = 0.4982 | Val mse = 0.8376
Epoch 13: Train mse = 0.4906 | Val mse = 0.8404
Epoch 14: Train mse = 0.4852 | Val mse = 0.8650
Epoch 15: Train mse = 0.4827 | Val mse = 0.8399
Epoch 16: Train mse = 0.4658 | Val mse = 0.8324
Epoch 17: Train mse = 0.4411 | Val mse = 0.8371
Epoch 18: Train mse = 0.4649 | Val mse = 0.8082
Epoch 19: Train mse = 0.4412 | Val mse = 0.8415
Epoch 20: Train mse = 0.4444 | Val mse = 0.8566
Epoch 21: Train mse = 0.4096 | Val mse = 0.8062
Epoch 22: Train mse = 0.3951 | Val mse = 0.8017
Epoch 23: Train mse = 0.3737 | Val mse = 0.7897
Epoch 24: Train mse = 0.3519 | Val mse = 0.8465
Epoch 25: Train mse = 0.3547 | Val mse = 0.8191
Epoch 26: Train mse = 0.3332 | Val mse = 0.8089
Epoch 27: Train mse = 0.2995 | Val mse = 0.7937
Epoch 28: Train mse = 0.2826 | Val mse = 0.8056
Epoch 29: Train mse = 0.2542 | Val mse = 0.8447
Epoch 30: Train mse = 0.2545 | Val mse = 0.7772
Epoch 31: Train mse = 0.2417 | Val mse = 0.7948
Epoch 32: Train mse = 0.2263 | Val mse = 0.7456
Epoch 33: Train mse = 0.1993 | Val mse = 0.8570
Epoch 34: Train mse = 0.1721 | Val mse = 0.7951
Epoch 35: Train mse = 0.1551 | Val mse = 0.7959
Epoch 36: Train mse = 0.1445 | Val mse = 0.8074
Epoch 37: Train mse = 0.1279 | Val mse = 0.8564
Epoch 38: Train mse = 0.1195 | Val mse = 0.8855
Epoch 39: Train mse = 0.1140 | Val mse = 0.8472
Epoch 40: Train mse = 0.1134 | Val mse = 0.8272
Epoch 41: Train mse = 0.1131 | Val mse = 0.8438
Epoch 42: Train mse = 0.1287 | Val mse = 0.8360
Early stopping triggered at epoch 42.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 5
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.76411568
1 day(s) RMSE                      : 2.69848498
1 day(s) R2                        : 0.09185841
1 day(s) Pearson r                 : 0.35945335
1 day(s) QLIKE                     : 1.10441525
3 day(s) MAE                       : 0.79637018
3 day(s) RMSE                      : 2.74813004
3 day(s) R2                        : 0.05818171
3 day(s) Pearson r                 : 0.25270709
3 day(s) QLIKE                     : 1.16567777
5 day(s) MAE                       : 0.81248715
5 day(s) RMSE                      : 2.78149061
5 day(s) R2                        : 0.03523203
5 day(s) Pearson r                 : 0.18920904
5 day(s) QLIKE                     : 1.14960550
full horizon MAE                   : 0.81248715
full horizon RMSE                  : 2.78149061
full horizon R2                    : 0.03523203
full horizon Pearson r             : 0.18920904
full horizon QLIKE                 : 1.14960550

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/SP500/Simple_KAN_H5.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.0114537, max=7.08836

=== SP500 | H=10 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3466
Time steps for y: 10
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790614
  Min value:  -4.314392989382238
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.596947126941468
  Min value:  -18.18335453470548
Checking X_price_val:
Shape: (277, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.189039765537131
  Min value:  -4.286988967386013
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.384110213828625
  Min value:  -2.417987125769155
Checking X_price_test:
Shape: (694, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.658768610066629
  Min value:  -4.286988967386013
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 10, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.378408171115341
  Min value:  -3.2438145134653293
Epoch 1: Train mse = 0.9520 | Val mse = 0.9979
Epoch 2: Train mse = 0.8281 | Val mse = 0.8288
Epoch 3: Train mse = 0.7498 | Val mse = 0.8393
Epoch 4: Train mse = 0.7015 | Val mse = 0.8030
Epoch 5: Train mse = 0.6614 | Val mse = 0.8254
Epoch 6: Train mse = 0.6267 | Val mse = 0.8429
Epoch 7: Train mse = 0.5972 | Val mse = 0.8580
Epoch 8: Train mse = 0.5719 | Val mse = 0.8593
Epoch 9: Train mse = 0.5560 | Val mse = 0.8283
Epoch 10: Train mse = 0.5514 | Val mse = 0.8443
Epoch 11: Train mse = 0.5470 | Val mse = 0.8694
Epoch 12: Train mse = 0.5385 | Val mse = 0.8480
Epoch 13: Train mse = 0.5280 | Val mse = 0.8683
Epoch 14: Train mse = 0.5155 | Val mse = 0.8499
Epoch 15: Train mse = 0.5029 | Val mse = 0.8337
Epoch 16: Train mse = 0.4908 | Val mse = 0.8486
Epoch 17: Train mse = 0.4786 | Val mse = 0.8486
Epoch 18: Train mse = 0.4719 | Val mse = 0.8594
Epoch 19: Train mse = 0.4598 | Val mse = 0.8593
Epoch 20: Train mse = 0.4528 | Val mse = 0.8353
Epoch 21: Train mse = 0.4415 | Val mse = 0.8501
Epoch 22: Train mse = 0.4317 | Val mse = 0.8526
Epoch 23: Train mse = 0.4251 | Val mse = 0.8419
Epoch 24: Train mse = 0.4043 | Val mse = 0.8016
Epoch 25: Train mse = 0.3871 | Val mse = 0.7865
Epoch 26: Train mse = 0.3804 | Val mse = 0.8240
Epoch 27: Train mse = 0.3466 | Val mse = 0.8679
Epoch 28: Train mse = 0.3220 | Val mse = 0.8431
Epoch 29: Train mse = 0.2977 | Val mse = 0.8468
Epoch 30: Train mse = 0.2772 | Val mse = 0.8379
Epoch 31: Train mse = 0.2646 | Val mse = 0.7500
Epoch 32: Train mse = 0.2552 | Val mse = 0.8015
Epoch 33: Train mse = 0.2637 | Val mse = 0.7646
Epoch 34: Train mse = 0.2429 | Val mse = 0.8006
Epoch 35: Train mse = 0.2186 | Val mse = 0.8439
Epoch 36: Train mse = 0.1988 | Val mse = 0.8469
Epoch 37: Train mse = 0.1776 | Val mse = 0.8368
Epoch 38: Train mse = 0.1661 | Val mse = 0.8223
Epoch 39: Train mse = 0.1640 | Val mse = 0.8134
Epoch 40: Train mse = 0.1458 | Val mse = 0.8044
Epoch 41: Train mse = 0.1295 | Val mse = 0.8171
Early stopping triggered at epoch 41.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 10
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.97352393
1 day(s) RMSE                      : 2.67704128
1 day(s) R2                        : 0.10623428
1 day(s) Pearson r                 : 0.34506264
1 day(s) QLIKE                     : 1.10281049
3 day(s) MAE                       : 0.93998728
3 day(s) RMSE                      : 2.72166340
3 day(s) R2                        : 0.07623525
3 day(s) Pearson r                 : 0.28456273
3 day(s) QLIKE                     : 1.18166441
5 day(s) MAE                       : 0.87641001
5 day(s) RMSE                      : 2.75760139
5 day(s) R2                        : 0.05173294
5 day(s) Pearson r                 : 0.22862445
5 day(s) QLIKE                     : 1.09906847
10 day(s) MAE                      : 0.88862985
10 day(s) RMSE                     : 2.82881823
10 day(s) R2                       : 0.00225638
10 day(s) Pearson r                : 0.11271233
10 day(s) QLIKE                    : 1.30865029
full horizon MAE                   : 0.88862985
full horizon RMSE                  : 2.82881823
full horizon R2                    : 0.00225638
full horizon Pearson r             : 0.11271233
full horizon QLIKE                 : 1.30865029

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/SP500/Simple_KAN_H10.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.0175449, max=7.86207

=== SP500 | H=20 | Simple_KAN (simple fit) | no_tasks=1 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3466
Time steps for y: 20
Features for y: 1

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790614
  Min value:  -4.314392989382238
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.5963112311058607
  Min value:  -18.187442810157716
Checking X_price_val:
Shape: (277, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.189039765537131
  Min value:  -4.286988967386013
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3832820721048678
  Min value:  -2.446834413184202
Checking X_price_test:
Shape: (694, 60, 7)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.658768610066629
  Min value:  -4.286988967386013
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 20, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.377737634830384
  Min value:  -3.2455347334033506
Epoch 1: Train mse = 0.9629 | Val mse = 1.0620
Epoch 2: Train mse = 0.8668 | Val mse = 0.8460
Epoch 3: Train mse = 0.7696 | Val mse = 0.8445
Epoch 4: Train mse = 0.7263 | Val mse = 0.8153
Epoch 5: Train mse = 0.6806 | Val mse = 0.8712
Epoch 6: Train mse = 0.6451 | Val mse = 0.8437
Epoch 7: Train mse = 0.6207 | Val mse = 0.8691
Epoch 8: Train mse = 0.6055 | Val mse = 0.8659
Epoch 9: Train mse = 0.5934 | Val mse = 0.8665
Epoch 10: Train mse = 0.5821 | Val mse = 0.8730
Epoch 11: Train mse = 0.5648 | Val mse = 0.8869
Epoch 12: Train mse = 0.5604 | Val mse = 0.8649
Epoch 13: Train mse = 0.5536 | Val mse = 0.8866
Epoch 14: Train mse = 0.5462 | Val mse = 0.8971
Epoch 15: Train mse = 0.5376 | Val mse = 0.8937
Epoch 16: Train mse = 0.5303 | Val mse = 0.9095
Epoch 17: Train mse = 0.5205 | Val mse = 0.8949
Epoch 18: Train mse = 0.5116 | Val mse = 0.9077
Epoch 19: Train mse = 0.5042 | Val mse = 0.9151
Epoch 20: Train mse = 0.4981 | Val mse = 0.9253
Epoch 21: Train mse = 0.4918 | Val mse = 0.9183
Epoch 22: Train mse = 0.4876 | Val mse = 0.8835
Epoch 23: Train mse = 0.4767 | Val mse = 0.8970
Epoch 24: Train mse = 0.4686 | Val mse = 0.8912
Epoch 25: Train mse = 0.4659 | Val mse = 0.8799
Epoch 26: Train mse = 0.4513 | Val mse = 0.8766
Epoch 27: Train mse = 0.4362 | Val mse = 0.8940
Epoch 28: Train mse = 0.4251 | Val mse = 0.9264
Epoch 29: Train mse = 0.4168 | Val mse = 0.8710
Epoch 30: Train mse = 0.4029 | Val mse = 0.8455
Early stopping triggered at epoch 30.

Parameters used in the single-fit model:
input_dim: 420
output_dim: 20
hidden_layers: 3
dropout: 0.00000000
lr: 0.00500000
epochs: 50
batch_size: 512
device: mps
verbose: True
no_tasks: 1
knots: 10
spline_power: 5
l2_weight: 0.00001000
hidden_dim: 128
patience: 10
min_epochs: 30
min_delta: 0.00010000
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.67130056
1 day(s) RMSE                      : 2.67292606
1 day(s) R2                        : 0.10898001
1 day(s) Pearson r                 : 0.44859876
1 day(s) QLIKE                     : 0.81602564
3 day(s) MAE                       : 0.69894441
3 day(s) RMSE                      : 2.70992888
3 day(s) R2                        : 0.08418374
3 day(s) Pearson r                 : 0.35032378
3 day(s) QLIKE                     : 0.87512684
5 day(s) MAE                       : 0.70697149
5 day(s) RMSE                      : 2.75351676
5 day(s) R2                        : 0.05454006
5 day(s) Pearson r                 : 0.25801164
5 day(s) QLIKE                     : 0.93956371
10 day(s) MAE                      : 0.72860408
10 day(s) RMSE                     : 2.80996414
10 day(s) R2                       : 0.01551199
10 day(s) Pearson r                : 0.13491425
10 day(s) QLIKE                    : 1.08517304
20 day(s) MAE                      : 0.73490099
20 day(s) RMSE                     : 2.82798793
20 day(s) R2                       : 0.00296030
20 day(s) Pearson r                : 0.08861047
20 day(s) QLIKE                    : 1.07393316
full horizon MAE                   : 0.73490099
full horizon RMSE                  : 2.82798793
full horizon R2                    : 0.00296030
full horizon Pearson r             : 0.08861047
full horizon QLIKE                 : 1.07393316

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/multi_input_results_saved_object/SP500/Simple_KAN_H20.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.19931, max=5.64792
In [824]:
multiple_input_frames = build_metric_frames(multi_input_results_store, outer_horizon="full", pretty_print=True)
export_metrics(multi_input_results_store, "results/metrics_multiple_inputs_all.csv", outer_horizon="full")
export_metrics(multi_input_results_store, "results/metrics_multiple_inputs_all.txt", outer_horizon="full")
=== AAPL | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.462510  2.523389  2.565542  2.606932
Pearson r -0.090941  0.008225 -0.046882 -0.104006
QLIKE      0.575769  0.557520  0.577154  0.595907
R2        -0.063777 -0.058242 -0.059858 -0.061696
RMSE       8.913924  9.195842  9.240946  9.268690

=== MSFT | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.280823  2.342318  2.251885  2.130082
Pearson r  0.110140  0.086747  0.030856 -0.047103
QLIKE      0.443757  0.479544  0.488425  0.524231
R2         0.009284 -0.001142 -0.036023 -0.044969
RMSE       6.671584  6.706749  6.822834  7.058939

=== GE | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                  H1         H5        H10        H20
MAE         4.045902   4.237860   4.238666   4.252219
Pearson r   0.134240   0.150633   0.130078   0.104110
QLIKE       0.583395   0.573050   0.608299   0.616018
R2         -0.000819  -0.012368  -0.003321  -0.008271
RMSE       27.437418  27.593684  27.472130  27.540542

=== BAC | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.274453  2.588921  2.369231  2.616549
Pearson r  0.250177 -0.087623 -0.121580 -0.090434
QLIKE      0.453586  0.470565  0.464555  0.564879
R2         0.050949 -0.019164 -0.024120 -0.068346
RMSE       7.562345  7.840424  7.859838  8.026430

=== C | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.830797  2.892440  2.880604  2.900656
Pearson r  0.261140 -0.021245 -0.012972 -0.024523
QLIKE      0.375795  0.448389  0.441054  0.441041
R2         0.018287 -0.060239 -0.049897 -0.045534
RMSE       9.352386  9.758562  9.722207  9.705828

=== BTCUSDT | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                  H1         H5        H10        H20
MAE         8.138601   5.697305   6.721080  10.311754
Pearson r   0.021307  -0.026282  -0.084843  -0.045376
QLIKE       0.850299   0.522103   0.651675   0.996957
R2         -0.874690  -0.213368  -0.380963  -2.817171
RMSE       12.577338  10.117711  10.795166  17.944543

=== EURUSD | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.094914  0.106529  0.118788  0.129708
Pearson r  0.574423  0.464119  0.409632  0.371018
QLIKE      0.388933  0.514366  0.576258  0.545863
R2         0.308988  0.200027  0.082086 -0.003169
RMSE       0.177595  0.189280  0.201792  0.209722

=== GOLD | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.386764  0.438366  0.450723  0.461202
Pearson r  0.318338  0.030626  0.035624 -0.031809
QLIKE      0.940854  0.700051  0.621370  0.643210
R2        -0.037377 -0.216076 -0.262363 -0.301049
RMSE       0.667450  0.723987  0.741785  0.758686

=== SP500 | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.622896  0.812487  0.888630  0.734901
Pearson r  0.610704  0.189209  0.112712  0.088610
QLIKE      0.822947  1.149605  1.308650  1.073933
R2         0.328867  0.035232  0.002256  0.002960
RMSE       2.319784  2.781491  2.828818  2.827988
Out[824]:
{'mode': 'text',
 'path': '/Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/results/metrics_multiple_inputs_all.txt',
 'sections': 9}

Results on modesl with more tasks¶

In [ ]:
import os
import pickle
import numpy as np


load_data_object_8_file_path = os.path.join(root_folder, objects_relative_path, "structured_data_dict_8.pkl")
with open(load_data_object_8_file_path, "rb") as f:
    structured_data_dict_8 = pickle.load(f)
print("Data dictionary 8 loaded successfully.")

tickers  = ["AAPL", "MSFT", "GE", "BAC", "C", "BTCUSDT", "EURUSD", "GOLD", "SP500"]
X_price_map, X_time_map, y_map = {}, {}, {}

missing = []
for t in tickers:
    if t not in structured_data_dict_8:
        missing.append(t)
        continue

    entry = structured_data_dict_8[t]
    Xp = entry.get("X_other", None)
    Xt = entry.get("X_time",  None)
    Y  = entry.get("y",       None)

    if Xp is None or Y is None:
        print(f"[WARN] {t}: missing X_other or y → skipping.")
        continue

    Xp = np.asarray(Xp, dtype=float)
    Xt = None if Xt is None else np.asarray(Xt, dtype=float)
    Y  = np.asarray(Y,  dtype=float)

    if Xp.ndim != 3 or Y.ndim != 3:
        print(f"[WARN] {t}: unexpected dims X_other={Xp.shape}, y={Y.shape} → skipping.")
        continue
    if Xt is not None and Xt.ndim != 3:
        print(f"[WARN] {t}: unexpected dims X_time={Xt.shape} → setting to None.")
        Xt = None

    X_price_map[t] = Xp
    X_time_map[t]  = Xt
    y_map[t]       = Y

if missing:
    print(f"[INFO] Missing tickers in data dict (skipped): {missing}")

for t in tickers:
    Xp = X_price_map.get(t); Xt = X_time_map.get(t); Y = y_map.get(t)
    print(t, "X_other", None if Xp is None else Xp.shape,
             "X_time",  None if Xt is None else Xt.shape,
             "y",       None if Y is None else Y.shape)


multiple_tasks_results_store = {}
multiple_tasks_models = ["Simple_KAN"]

MULTI_TASK_MODELS = {"Simple_KAN"}


MULTI_TASK_BASE_SAVE_DIR = os.path.join(root_folder, objects_relative_path, "multi_task_results_saved_object")
os.makedirs(MULTI_TASK_BASE_SAVE_DIR, exist_ok=True)

_ = run_all_models_for_all(
    tickers=final_tickers,
    horizons=final_horizons,
    model_list=multiple_tasks_models,
    X_price_map=X_price_map,
    X_time_map=X_time_map,
    y_map=y_map,
    base_save_dir=MULTI_TASK_BASE_SAVE_DIR,
    fixed_params=FIXED_PARAMS_MERGED,   
    common_params=COMMON,
    model_io=MODEL_IO,
    multitask_models=MULTI_TASK_MODELS,
    param_grids=None,
    results_store=multiple_tasks_results_store,
    overwrite=False
)
Data dictionary 8 loaded successfully.
AAPL X_other (2018, 60, 1) X_time (2018, 60, 14) y (2018, 60, 6)
MSFT X_other (2018, 60, 1) X_time (2018, 60, 14) y (2018, 60, 6)
GE X_other (2018, 60, 1) X_time (2018, 60, 14) y (2018, 60, 6)
BAC X_other (2018, 60, 1) X_time (2018, 60, 14) y (2018, 60, 6)
C X_other (2018, 60, 1) X_time (2018, 60, 14) y (2018, 60, 6)
BTCUSDT X_other (2399, 60, 1) X_time (2399, 60, 14) y (2399, 60, 6)
EURUSD X_other (3782, 60, 1) X_time (3782, 60, 14) y (3782, 60, 6)
GOLD X_other (5534, 60, 1) X_time (5534, 60, 14) y (5534, 60, 6)
SP500 X_other (3466, 60, 1) X_time (3466, 60, 14) y (3466, 60, 6)
[skip] AAPL | H=1 | Simple_KAN (exists, overwrite=False)
[skip] AAPL | H=5 | Simple_KAN (exists, overwrite=False)
[skip] AAPL | H=10 | Simple_KAN (exists, overwrite=False)
[skip] AAPL | H=20 | Simple_KAN (exists, overwrite=False)
[skip] MSFT | H=1 | Simple_KAN (exists, overwrite=False)
[skip] MSFT | H=5 | Simple_KAN (exists, overwrite=False)
[skip] MSFT | H=10 | Simple_KAN (exists, overwrite=False)
[skip] MSFT | H=20 | Simple_KAN (exists, overwrite=False)
[skip] GE | H=1 | Simple_KAN (exists, overwrite=False)
[skip] GE | H=5 | Simple_KAN (exists, overwrite=False)
[skip] GE | H=10 | Simple_KAN (exists, overwrite=False)
[skip] GE | H=20 | Simple_KAN (exists, overwrite=False)
[skip] BAC | H=1 | Simple_KAN (exists, overwrite=False)
[skip] BAC | H=5 | Simple_KAN (exists, overwrite=False)
[skip] BAC | H=10 | Simple_KAN (exists, overwrite=False)
[skip] BAC | H=20 | Simple_KAN (exists, overwrite=False)
[skip] C | H=1 | Simple_KAN (exists, overwrite=False)
[skip] C | H=5 | Simple_KAN (exists, overwrite=False)
[skip] C | H=10 | Simple_KAN (exists, overwrite=False)
[skip] C | H=20 | Simple_KAN (exists, overwrite=False)
[skip] BTCUSDT | H=1 | Simple_KAN (exists, overwrite=False)
[skip] BTCUSDT | H=5 | Simple_KAN (exists, overwrite=False)
[skip] BTCUSDT | H=10 | Simple_KAN (exists, overwrite=False)
[skip] BTCUSDT | H=20 | Simple_KAN (exists, overwrite=False)
[skip] EURUSD | H=1 | Simple_KAN (exists, overwrite=False)
[skip] EURUSD | H=5 | Simple_KAN (exists, overwrite=False)
[skip] EURUSD | H=10 | Simple_KAN (exists, overwrite=False)
[skip] EURUSD | H=20 | Simple_KAN (exists, overwrite=False)
[skip] GOLD | H=1 | Simple_KAN (exists, overwrite=False)
[skip] GOLD | H=5 | Simple_KAN (exists, overwrite=False)
[skip] GOLD | H=10 | Simple_KAN (exists, overwrite=False)
[skip] GOLD | H=20 | Simple_KAN (exists, overwrite=False)
[skip] SP500 | H=1 | Simple_KAN (exists, overwrite=False)
[skip] SP500 | H=5 | Simple_KAN (exists, overwrite=False)
[skip] SP500 | H=10 | Simple_KAN (exists, overwrite=False)
[skip] SP500 | H=20 | Simple_KAN (exists, overwrite=False)
In [827]:
frames = build_metric_frames(
    multiple_tasks_results_store,
    task_name="Task 0",            
    outer_horizon="full",
    pretty_print=True
)


os.makedirs("results", exist_ok=True)
export_metrics(multiple_tasks_results_store, "results/multiple_task_all.csv", outer_horizon="full")
export_metrics(multiple_tasks_results_store, "results/multiple_task_all.txt",  outer_horizon="full")
print("Saved metrics to results/multiple_task_all.csv and results/multiple_task_all.txt")
=== AAPL | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.233370  2.420229  2.496394  2.542620
Pearson r  0.403259  0.374102  0.226162  0.176836
QLIKE      0.339369  0.390355  0.456250  0.483922
R2         0.049072  0.032749  0.005055 -0.029823
RMSE       8.427859  8.791616  8.953487  9.128504

=== MSFT | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.025080  2.072445  2.086677  2.150716
Pearson r  0.054634 -0.008634 -0.032227 -0.021689
QLIKE      0.479356  0.485605  0.488221  0.494321
R2        -0.017841 -0.022692 -0.025370 -0.023969
RMSE       6.762301  6.778548  6.787668  6.987651

=== GE | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                  H1         H5        H10        H20
MAE         4.109229   4.179038   4.323730   4.390263
Pearson r   0.005856  -0.003281  -0.009208  -0.019724
QLIKE       0.693059   0.717717   0.716372   0.750153
R2         -0.011797  -0.014988  -0.010958  -0.017834
RMSE       27.587478  27.629370  27.576487  27.670843

=== BAC | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.246216  2.245612  2.253850  2.858156
Pearson r  0.365015  0.087234  0.025997  0.051217
QLIKE      0.337393  0.417836  0.424769  0.467983
R2         0.123384 -0.010612 -0.015985 -0.483034
RMSE       7.268024  7.807460  7.828558  9.456755

=== C | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.761594  2.970135  3.076174  2.866582
Pearson r  0.432482  0.282608  0.161675  0.039654
QLIKE      0.312212  0.378857  0.418921  0.430557
R2         0.175541  0.063556  0.000168 -0.038355
RMSE       8.570666  9.171173  9.487568  9.672447

=== BTCUSDT | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        4.085041  4.623494  4.735971  4.865638
Pearson r  0.336386  0.168642  0.109898  0.013987
QLIKE      0.257825  0.372967  0.381542  0.398666
R2         0.017161 -0.024781 -0.027522 -0.057018
RMSE       9.106789  9.298258  9.311806  9.442844

=== EURUSD | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.089256  0.098608  0.098084  0.101612
Pearson r  0.589095  0.457400  0.432414  0.355138
QLIKE      0.379050  0.490405  0.474720  0.525571
R2         0.342939  0.173361  0.141422  0.070494
RMSE       0.173178  0.192408  0.195161  0.201875

=== GOLD | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.320758  0.386195  0.404782  0.441989
Pearson r  0.482068  0.369484  0.358787  0.226124
QLIKE      0.505835  0.645387  0.548921  0.630642
R2         0.190177 -0.004309 -0.196087 -0.234453
RMSE       0.589719  0.657936  0.722050  0.739013

=== SP500 | Simple_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.512058  0.633248  0.661464  0.749131
Pearson r  0.562249  0.371859  0.278503  0.200656
QLIKE      0.639160  0.666840  0.760797  0.807649
R2         0.246671  0.089974  0.053271 -0.116840
RMSE       2.457739  2.701426  2.755551  2.993070
Saved metrics to results/multiple_task_all.csv and results/multiple_task_all.txt

Results on hierarhical models¶

In [835]:
import os
import pickle
import numpy as np

load_data_object_8_file_path = os.path.join(root_folder, objects_relative_path, "structured_data_dict_8.pkl")
with open(load_data_object_8_file_path, "rb") as f:
    structured_data_dict_8 = pickle.load(f)
print("Data dictionary 8 loaded successfully.")

final_tickers  = ["AAPL", "MSFT", "GE", "BAC", "C", "BTCUSDT", "EURUSD", "GOLD", "SP500"]
final_horizons = [1, 5, 10, 20]

X_price_map, X_time_map, y_map = {}, {}, {}
missing = []
for t in final_tickers:
    if t not in structured_data_dict_8:
        missing.append(t); continue
    entry = structured_data_dict_8[t]
    Xp = entry.get("X_other"); Xt = entry.get("X_time"); Y = entry.get("y")
    if Xp is None or Y is None:
        print(f"[WARN] {t}: missing X_other or y → skipping."); continue
    Xp = np.asarray(Xp, dtype=float)
    Xt = None if Xt is None else np.asarray(Xt, dtype=float)
    Y  = np.asarray(Y,  dtype=float)
    if Xp.ndim != 3 or Y.ndim != 3:
        print(f"[WARN] {t}: unexpected dims X_other={Xp.shape}, y={Y.shape} → skipping."); continue
    if Xt is not None and Xt.ndim != 3:
        print(f"[WARN] {t}: unexpected dims X_time={Xt.shape} → setting to None."); Xt = None
    X_price_map[t] = Xp; X_time_map[t] = Xt; y_map[t] = Y
if missing:
    print(f"[INFO] Missing tickers in data dict (skipped): {missing}")

FIXED_PARAMS = {
    "Custom_KAN": dict(
        lr=5e-4, dropout=0.0, l2_weight=1e-5, batch_size=128, hidden_layers=3, hidden_dim=128, knots=8, spline_power=5,
        epochs=50, patience=10, min_epochs=30, min_delta=1e-4
    ),
    "Custom_KAN_LSTM": dict(
        lr=5e-4, dropout=0.0, l2_weight=1e-5, batch_size=32, hidden_layers=2, hidden_dim=32, knots=8, spline_power=5,
        epochs=20, patience=5, min_epochs=10, min_delta=1e-4
    )
}

MODEL_IO = {
    "Custom_KAN":   dict(merge_price_time=False, flatten=True),
    "Custom_KAN_LSTM": dict(merge_price_time=False, flatten=False),
}

COMMON = dict(
    use_nested_cv=False,
    single_holdout=False,
    normalize_X=True,
    normalize_Time=True,
    normalize_y=True,
    verbose=True,
    target_mode="log_mse"
)

hierarchical_results_store = {}
hierarchical_tasks_models = ["Custom_KAN", "Custom_KAN_LSTM"]
MULTI_TASK_MODELS = {"Custom_KAN", "Custom_KAN_LSTM"}

HIERARCHICAL_BASE_SAVE_DIR = os.path.join(root_folder, objects_relative_path, "hierarchical_results_saved_object")
os.makedirs(HIERARCHICAL_BASE_SAVE_DIR, exist_ok=True)

_ = run_all_models_for_all(
    tickers=final_tickers,
    horizons=final_horizons,
    model_list=hierarchical_tasks_models,
    X_price_map=X_price_map,
    X_time_map=X_time_map,
    y_map=y_map,
    base_save_dir=HIERARCHICAL_BASE_SAVE_DIR,
    fixed_params=FIXED_PARAMS,
    common_params=COMMON,
    model_io=MODEL_IO,
    multitask_models=MULTI_TASK_MODELS,
    param_grids=None,
    results_store=hierarchical_results_store,
    overwrite=False
)
Data dictionary 8 loaded successfully.

=== AAPL | H=1 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714204
  Min value:  -0.03434294798653995
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.133266415235102
  Min value:  -13.098170041087558
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.051920323212192546
  Min value:  -0.0338203458440787
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.9069843280619727
  Min value:  -6.555005878598166
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.19306601030720755
  Min value:  -0.03421247164703902
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.8950185841626324
  Min value:  -13.098170041087558
Epoch 001 | phase=1 | train_loss=5.2656 | val_main=0.580879
Epoch 002 | phase=1 | train_loss=4.7336 | val_main=0.580655
Epoch 003 | phase=1 | train_loss=5.0280 | val_main=0.580830
Epoch 004 | phase=1 | train_loss=5.0805 | val_main=0.580853
Epoch 005 | phase=1 | train_loss=4.7396 | val_main=0.580849
Epoch 006 | phase=1 | train_loss=4.7586 | val_main=0.580791
Epoch 007 | phase=1 | train_loss=5.0139 | val_main=0.580762
Epoch 008 | phase=1 | train_loss=4.9333 | val_main=0.580841
Epoch 009 | phase=1 | train_loss=4.9417 | val_main=0.580780
Epoch 010 | phase=1 | train_loss=4.8842 | val_main=0.580725
Epoch 011 | phase=1 | train_loss=4.8221 | val_main=0.580772
Epoch 012 | phase=1 | train_loss=4.8437 | val_main=0.580800
Epoch 013 | phase=1 | train_loss=4.8300 | val_main=0.580828
Epoch 014 | phase=1 | train_loss=4.9623 | val_main=0.580748
Epoch 015 | phase=1 | train_loss=4.7319 | val_main=0.580731
Epoch 016 | phase=0 | train_loss=5.9581 | val_main=0.555534
Epoch 017 | phase=0 | train_loss=5.6949 | val_main=0.557962
Epoch 018 | phase=0 | train_loss=5.8127 | val_main=0.546465
Epoch 019 | phase=0 | train_loss=5.7872 | val_main=0.542669
Epoch 020 | phase=0 | train_loss=5.7358 | val_main=0.552767
Epoch 021 | phase=0 | train_loss=5.6837 | val_main=0.542813
Epoch 022 | phase=0 | train_loss=5.6503 | val_main=0.416276
Epoch 023 | phase=0 | train_loss=5.6826 | val_main=0.420905
Epoch 024 | phase=0 | train_loss=5.4319 | val_main=0.601478
Epoch 025 | phase=0 | train_loss=5.6861 | val_main=0.352899
Epoch 026 | phase=0 | train_loss=5.2834 | val_main=0.360113
Epoch 027 | phase=0 | train_loss=5.1426 | val_main=0.344435
Epoch 028 | phase=0 | train_loss=5.3386 | val_main=0.341884
Epoch 029 | phase=0 | train_loss=5.2108 | val_main=0.354185
Epoch 030 | phase=0 | train_loss=5.2402 | val_main=0.355077
Epoch 031 | phase=2 | train_loss=0.6062 | val_main=0.353629
Epoch 032 | phase=2 | train_loss=0.6129 | val_main=0.360563
Epoch 033 | phase=2 | train_loss=0.6202 | val_main=0.356180
Epoch 034 | phase=2 | train_loss=0.5954 | val_main=0.358796
Epoch 035 | phase=2 | train_loss=0.6951 | val_main=0.355777
Epoch 036 | phase=2 | train_loss=0.5897 | val_main=0.358530
Epoch 037 | phase=2 | train_loss=0.5819 | val_main=0.360066
Epoch 038 | phase=2 | train_loss=0.5915 | val_main=0.357502
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 6
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.31861463
1 day(s) RMSE                      : 8.52916063
1 day(s) R2                        : 0.02607494
1 day(s) Pearson r                 : 0.37040281
1 day(s) QLIKE                     : 0.36833499
full horizon MAE                   : 2.31861463
full horizon RMSE                  : 8.52916063
full horizon R2                    : 0.02607494
full horizon Pearson r             : 0.37040281
full horizon QLIKE                 : 0.36833499

--- Task 2 ---
1 day(s) MAE                       : 0.08133260
1 day(s) RMSE                      : 0.10965309
1 day(s) R2                        : -0.64432686
1 day(s) Pearson r                 : 0.41169904
1 day(s) QLIKE                     : 3.74021587
full horizon MAE                   : 0.08133260
full horizon RMSE                  : 0.10965309
full horizon R2                    : -0.64432686
full horizon Pearson r             : 0.41169904
full horizon QLIKE                 : 3.74021587

--- Task 3 ---
1 day(s) MAE                       : 0.51494433
1 day(s) RMSE                      : 0.53802927
1 day(s) R2                        : -0.36626247
1 day(s) Pearson r                 : -0.13818377
1 day(s) QLIKE                     : 15.52352989
full horizon MAE                   : 0.51494433
full horizon RMSE                  : 0.53802927
full horizon R2                    : -0.36626247
full horizon Pearson r             : -0.13818377
full horizon QLIKE                 : 15.52352989

--- Task 4 ---
1 day(s) MAE                       : 1.03912987
1 day(s) RMSE                      : 1.27053290
1 day(s) R2                        : -1.27672097
1 day(s) Pearson r                 : 0.13806596
1 day(s) QLIKE                     : 0.99488755
full horizon MAE                   : 1.03912987
full horizon RMSE                  : 1.27053290
full horizon R2                    : -1.27672097
full horizon Pearson r             : 0.13806596
full horizon QLIKE                 : 0.99488755

--- Task 5 ---
1 day(s) MAE                       : 0.10461350
1 day(s) RMSE                      : 0.11908139
1 day(s) R2                        : -4.97197511
1 day(s) Pearson r                 : -0.24486432
1 day(s) QLIKE                     : 0.80372846
full horizon MAE                   : 0.10461350
full horizon RMSE                  : 0.11908139
full horizon R2                    : -4.97197511
full horizon Pearson r             : -0.24486432
full horizon QLIKE                 : 0.80372846

--- Task 6 ---
1 day(s) MAE                       : 1.77869615
1 day(s) RMSE                      : 2.26988993
1 day(s) R2                        : -1.36228680
1 day(s) Pearson r                 : -0.49594072
1 day(s) QLIKE                     : 0.08016863
full horizon MAE                   : 1.77869615
full horizon RMSE                  : 2.26988993
full horizon R2                    : -1.36228680
full horizon Pearson r             : -0.49594072
full horizon QLIKE                 : 0.08016863

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/AAPL/Custom_KAN_H1.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=1.13205, max=7.21495

=== AAPL | H=1 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2018
Time steps for y: 1
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714204
  Min value:  -0.03434294798653995
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.133266415235102
  Min value:  -13.098170041087558
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.051920323212192546
  Min value:  -0.0338203458440787
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.9069843280619727
  Min value:  -6.555005878598166
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.19306601030720755
  Min value:  -0.03421247164703902
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.8950185841626324
  Min value:  -13.098170041087558
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
ERROR:tensorflow:==================================
Object was never used (type <class 'tensorflow.python.ops.tensor_array_ops.TensorArray'>):
<tensorflow.python.ops.tensor_array_ops.TensorArray object at 0x7174e4f640>
If you want to mark it as used call its "mark_used()" method.
It was originally created here:
  File "/opt/anaconda3/envs/NN_env/lib/python3.10/site-packages/keras/src/backend/tensorflow/rnn.py", line 418, in <genexpr>
    output_ta_t = tuple(  File "/opt/anaconda3/envs/NN_env/lib/python3.10/site-packages/tensorflow/python/util/tf_should_use.py", line 288, in wrapped
    return _add_should_use_warning(fn(*args, **kwargs),
==================================
46/46 ━━━━━━━━━━━━━━━━━━━━ 246s 423ms/step - loss: 1.0050 - val_loss: 0.6297 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 7s 162ms/step - loss: 0.9933 - val_loss: 0.6360 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 8s 183ms/step - loss: 0.9932 - val_loss: 0.6397 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 187ms/step - loss: 0.9931 - val_loss: 0.6526 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 196ms/step - loss: 0.9929 - val_loss: 0.6482 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 0s 184ms/step - loss: 1.0096
Epoch 6: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 190ms/step - loss: 0.9928 - val_loss: 0.6476 - learning_rate: 5.0000e-04
Epoch 6: early stopping
Restoring model weights from the end of the best epoch: 1.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 1
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.43932468
1 day(s) RMSE                      : 8.72179704
1 day(s) R2                        : -0.01841527
1 day(s) Pearson r                 : 0.45143803
1 day(s) QLIKE                     : 0.53538042
full horizon MAE                   : 2.43932468
full horizon RMSE                  : 8.72179704
full horizon R2                    : -0.01841527
full horizon Pearson r             : 0.45143803
full horizon QLIKE                 : 0.53538042

--- Task 2 ---
1 day(s) MAE                       : 0.08153062
1 day(s) RMSE                      : 0.11017861
1 day(s) R2                        : -0.66012580
1 day(s) Pearson r                 : -0.10636611
1 day(s) QLIKE                     : 3.72285469
full horizon MAE                   : 0.08153062
full horizon RMSE                  : 0.11017861
full horizon R2                    : -0.66012580
full horizon Pearson r             : -0.10636611
full horizon QLIKE                 : 3.72285469

--- Task 3 ---
1 day(s) MAE                       : 0.49865958
1 day(s) RMSE                      : 0.52397619
1 day(s) R2                        : -0.29582228
1 day(s) Pearson r                 : -0.03726016
1 day(s) QLIKE                     : 15.47962049
full horizon MAE                   : 0.49865958
full horizon RMSE                  : 0.52397619
full horizon R2                    : -0.29582228
full horizon Pearson r             : -0.03726016
full horizon QLIKE                 : 15.47962049

--- Task 4 ---
1 day(s) MAE                       : 1.03795360
1 day(s) RMSE                      : 1.26664632
1 day(s) R2                        : -1.26281321
1 day(s) Pearson r                 : 0.11562097
1 day(s) QLIKE                     : 0.99635614
full horizon MAE                   : 1.03795360
full horizon RMSE                  : 1.26664632
full horizon R2                    : -1.26281321
full horizon Pearson r             : 0.11562097
full horizon QLIKE                 : 0.99635614

--- Task 5 ---
1 day(s) MAE                       : 0.04714031
1 day(s) RMSE                      : 0.06211790
1 day(s) R2                        : -0.62503863
1 day(s) Pearson r                 : 0.31877883
1 day(s) QLIKE                     : 0.11160550
full horizon MAE                   : 0.04714031
full horizon RMSE                  : 0.06211790
full horizon R2                    : -0.62503863
full horizon Pearson r             : 0.31877883
full horizon QLIKE                 : 0.11160550

--- Task 6 ---
1 day(s) MAE                       : 1.44468791
1 day(s) RMSE                      : 1.58885216
1 day(s) R2                        : -0.15741739
1 day(s) Pearson r                 : 0.28092223
1 day(s) QLIKE                     : 0.03597071
full horizon MAE                   : 1.44468791
full horizon RMSE                  : 1.58885216
full horizon R2                    : -0.15741739
full horizon Pearson r             : 0.28092223
full horizon QLIKE                 : 0.03597071

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/AAPL/Custom_KAN_LSTM_H1.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=2.4167, max=2.41709

=== AAPL | H=5 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714204
  Min value:  -0.03434294798653995
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.140231129276028
  Min value:  -13.09819653554311
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.051920323212192546
  Min value:  -0.0338203458440787
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.9079382526637185
  Min value:  -6.554957295267729
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.19306601030720755
  Min value:  -0.03421247164703902
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.8969263469922275
  Min value:  -13.09819653554311
Epoch 001 | phase=1 | train_loss=5.0681 | val_main=0.587713
Epoch 002 | phase=1 | train_loss=4.7434 | val_main=0.587708
Epoch 003 | phase=1 | train_loss=4.9557 | val_main=0.587690
Epoch 004 | phase=1 | train_loss=4.9394 | val_main=0.587686
Epoch 005 | phase=1 | train_loss=4.7655 | val_main=0.587713
Epoch 006 | phase=1 | train_loss=4.8220 | val_main=0.587699
Epoch 007 | phase=1 | train_loss=4.9456 | val_main=0.587671
Epoch 008 | phase=1 | train_loss=4.8953 | val_main=0.587693
Epoch 009 | phase=1 | train_loss=4.9145 | val_main=0.587682
Epoch 010 | phase=1 | train_loss=4.9372 | val_main=0.587678
Epoch 011 | phase=1 | train_loss=4.8423 | val_main=0.587683
Epoch 012 | phase=1 | train_loss=4.8731 | val_main=0.587686
Epoch 013 | phase=1 | train_loss=4.8636 | val_main=0.587682
Epoch 014 | phase=1 | train_loss=4.8959 | val_main=0.587677
Epoch 015 | phase=1 | train_loss=4.7466 | val_main=0.587672
Epoch 016 | phase=0 | train_loss=5.9270 | val_main=0.577843
Epoch 017 | phase=0 | train_loss=5.7934 | val_main=0.564152
Epoch 018 | phase=0 | train_loss=5.9877 | val_main=0.560024
Epoch 019 | phase=0 | train_loss=5.8449 | val_main=0.554524
Epoch 020 | phase=0 | train_loss=5.9626 | val_main=0.556333
Epoch 021 | phase=0 | train_loss=5.8091 | val_main=0.559991
Epoch 022 | phase=0 | train_loss=5.9202 | val_main=0.559138
Epoch 023 | phase=0 | train_loss=5.9448 | val_main=0.567318
Epoch 024 | phase=0 | train_loss=5.8409 | val_main=0.561319
Epoch 025 | phase=0 | train_loss=6.1025 | val_main=0.554772
Epoch 026 | phase=0 | train_loss=5.8045 | val_main=0.540072
Epoch 027 | phase=0 | train_loss=5.6988 | val_main=0.427341
Epoch 028 | phase=0 | train_loss=5.7054 | val_main=0.379700
Epoch 029 | phase=0 | train_loss=5.4793 | val_main=0.406403
Epoch 030 | phase=0 | train_loss=5.4662 | val_main=0.359878
Epoch 031 | phase=2 | train_loss=0.6987 | val_main=0.455149
Epoch 032 | phase=2 | train_loss=0.6905 | val_main=0.402307
Epoch 033 | phase=2 | train_loss=0.6873 | val_main=0.379673
Epoch 034 | phase=2 | train_loss=0.6694 | val_main=0.386497
Epoch 035 | phase=2 | train_loss=0.6901 | val_main=0.393787
Epoch 036 | phase=2 | train_loss=0.6550 | val_main=0.389367
Epoch 037 | phase=2 | train_loss=0.6402 | val_main=0.378044
Epoch 038 | phase=2 | train_loss=0.6421 | val_main=0.380437
Epoch 039 | phase=2 | train_loss=0.6494 | val_main=0.373024
Epoch 040 | phase=2 | train_loss=0.6337 | val_main=0.385306
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 30
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.35718910
1 day(s) RMSE                      : 8.58201427
1 day(s) R2                        : 0.01396708
1 day(s) Pearson r                 : 0.35270661
1 day(s) QLIKE                     : 0.38347309
3 day(s) MAE                       : 2.45247231
3 day(s) RMSE                      : 8.85642024
3 day(s) R2                        : 0.00748488
3 day(s) Pearson r                 : 0.33469079
3 day(s) QLIKE                     : 0.41221190
5 day(s) MAE                       : 2.48726231
5 day(s) RMSE                      : 8.95131615
5 day(s) R2                        : -0.00271077
5 day(s) Pearson r                 : 0.28444708
5 day(s) QLIKE                     : 0.43209580
full horizon MAE                   : 2.48726231
full horizon RMSE                  : 8.95131615
full horizon R2                    : -0.00271077
full horizon Pearson r             : 0.28444708
full horizon QLIKE                 : 0.43209580

--- Task 2 ---
1 day(s) MAE                       : 0.08377364
1 day(s) RMSE                      : 0.11465373
1 day(s) R2                        : -0.79772301
1 day(s) Pearson r                 : 0.35229515
1 day(s) QLIKE                     : 3.63412772
3 day(s) MAE                       : 0.08425763
3 day(s) RMSE                      : 0.11505583
3 day(s) R2                        : -0.79499929
3 day(s) Pearson r                 : 0.32727704
3 day(s) QLIKE                     : 3.64135878
5 day(s) MAE                       : 0.08483972
5 day(s) RMSE                      : 0.11564764
5 day(s) R2                        : -0.80022688
5 day(s) Pearson r                 : 0.32006032
5 day(s) QLIKE                     : 3.64643012
full horizon MAE                   : 0.08483972
full horizon RMSE                  : 0.11564764
full horizon R2                    : -0.80022688
full horizon Pearson r             : 0.32006032
full horizon QLIKE                 : 3.64643012

--- Task 3 ---
1 day(s) MAE                       : 0.50863885
1 day(s) RMSE                      : 0.53213426
1 day(s) R2                        : -0.33648711
1 day(s) Pearson r                 : -0.07152109
1 day(s) QLIKE                     : 15.49084688
3 day(s) MAE                       : 0.50688446
3 day(s) RMSE                      : 0.53118217
3 day(s) R2                        : -0.33646181
3 day(s) Pearson r                 : -0.06080745
3 day(s) QLIKE                     : 15.48750370
5 day(s) MAE                       : 0.50485207
5 day(s) RMSE                      : 0.52965804
5 day(s) R2                        : -0.33325075
5 day(s) Pearson r                 : -0.04799882
5 day(s) QLIKE                     : 15.48456858
full horizon MAE                   : 0.50485207
full horizon RMSE                  : 0.52965804
full horizon R2                    : -0.33325075
full horizon Pearson r             : -0.04799882
full horizon QLIKE                 : 15.48456858

--- Task 4 ---
1 day(s) MAE                       : 1.03968581
1 day(s) RMSE                      : 1.27088613
1 day(s) R2                        : -1.27798706
1 day(s) Pearson r                 : -0.07987644
1 day(s) QLIKE                     : 0.99644317
3 day(s) MAE                       : 1.04113320
3 day(s) RMSE                      : 1.27136733
3 day(s) R2                        : -1.28873740
3 day(s) Pearson r                 : -0.13006035
3 day(s) QLIKE                     : 0.98935306
5 day(s) MAE                       : 1.04243624
5 day(s) RMSE                      : 1.27168341
5 day(s) R2                        : -1.29889327
5 day(s) Pearson r                 : -0.11772813
5 day(s) QLIKE                     : 0.98209272
full horizon MAE                   : 1.04243624
full horizon RMSE                  : 1.27168341
full horizon R2                    : -1.29889327
full horizon Pearson r             : -0.11772813
full horizon QLIKE                 : 0.98209272

--- Task 5 ---
1 day(s) MAE                       : 0.04882928
1 day(s) RMSE                      : 0.06607644
1 day(s) R2                        : -0.83875318
1 day(s) Pearson r                 : -0.33530271
1 day(s) QLIKE                     : 0.26964598
3 day(s) MAE                       : 0.04970202
3 day(s) RMSE                      : 0.06708103
3 day(s) R2                        : -0.86007125
3 day(s) Pearson r                 : -0.33462253
3 day(s) QLIKE                     : 0.27946477
5 day(s) MAE                       : 0.04990478
5 day(s) RMSE                      : 0.06761364
5 day(s) R2                        : -0.85308747
5 day(s) Pearson r                 : -0.33641994
5 day(s) QLIKE                     : 0.27976206
full horizon MAE                   : 0.04990478
full horizon RMSE                  : 0.06761364
full horizon R2                    : -0.85308747
full horizon Pearson r             : -0.33641994
full horizon QLIKE                 : 0.27976206

--- Task 6 ---
1 day(s) MAE                       : 1.60954344
1 day(s) RMSE                      : 2.03313770
1 day(s) R2                        : -0.89520683
1 day(s) Pearson r                 : -0.48810658
1 day(s) QLIKE                     : 0.06504519
3 day(s) MAE                       : 1.60392788
3 day(s) RMSE                      : 2.01860167
3 day(s) R2                        : -0.86044913
3 day(s) Pearson r                 : -0.48654089
3 day(s) QLIKE                     : 0.06441922
5 day(s) MAE                       : 1.60759207
5 day(s) RMSE                      : 2.02175228
5 day(s) R2                        : -0.85664750
5 day(s) Pearson r                 : -0.48681333
5 day(s) QLIKE                     : 0.06476662
full horizon MAE                   : 1.60759207
full horizon RMSE                  : 2.02175228
full horizon R2                    : -0.85664750
full horizon Pearson r             : -0.48681333
full horizon QLIKE                 : 0.06476662

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/AAPL/Custom_KAN_H5.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=1.16991, max=6.75209

=== AAPL | H=5 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2018
Time steps for y: 5
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714204
  Min value:  -0.03434294798653995
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.140231129276028
  Min value:  -13.09819653554311
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.051920323212192546
  Min value:  -0.0338203458440787
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.9079382526637185
  Min value:  -6.554957295267729
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.19306601030720755
  Min value:  -0.03421247164703902
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.8969263469922275
  Min value:  -13.09819653554311
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 139s 428ms/step - loss: 0.8677 - val_loss: 0.7315 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 10s 213ms/step - loss: 0.6348 - val_loss: 1.0547 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 240ms/step - loss: 0.5550 - val_loss: 1.0444 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 241ms/step - loss: 0.5191 - val_loss: 1.0782 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 243ms/step - loss: 0.5003 - val_loss: 1.0121 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 0s 238ms/step - loss: 0.4637
Epoch 6: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 246ms/step - loss: 0.4904 - val_loss: 0.9914 - learning_rate: 5.0000e-04
Epoch 6: early stopping
Restoring model weights from the end of the best epoch: 1.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 5
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.49347025
1 day(s) RMSE                      : 8.69984223
1 day(s) R2                        : -0.01329455
1 day(s) Pearson r                 : 0.44610270
1 day(s) QLIKE                     : 0.53532791
3 day(s) MAE                       : 2.67081843
3 day(s) RMSE                      : 8.93140018
3 day(s) R2                        : -0.00939185
3 day(s) Pearson r                 : 0.00759024
3 day(s) QLIKE                     : 0.55392763
5 day(s) MAE                       : 2.73369511
5 day(s) RMSE                      : 8.97461124
5 day(s) R2                        : -0.00793652
5 day(s) Pearson r                 : 0.00651749
5 day(s) QLIKE                     : 0.55788240
full horizon MAE                   : 2.73369511
full horizon RMSE                  : 8.97461124
full horizon R2                    : -0.00793652
full horizon Pearson r             : 0.00651749
full horizon QLIKE                 : 0.55788240

--- Task 2 ---
1 day(s) MAE                       : 0.08382950
1 day(s) RMSE                      : 0.11500732
1 day(s) R2                        : -0.80882856
1 day(s) Pearson r                 : -0.29905991
1 day(s) QLIKE                     : 3.72320856
3 day(s) MAE                       : 0.08777396
3 day(s) RMSE                      : 0.12071934
3 day(s) R2                        : -0.97606291
3 day(s) Pearson r                 : -0.00658043
3 day(s) QLIKE                     : 4.87053765
5 day(s) MAE                       : 0.08993878
5 day(s) RMSE                      : 0.12337385
5 day(s) R2                        : -1.04880161
5 day(s) Pearson r                 : -0.00928899
5 day(s) QLIKE                     : 7.08719452
full horizon MAE                   : 0.08993878
full horizon RMSE                  : 0.12337385
full horizon R2                    : -1.04880161
full horizon Pearson r             : -0.00928899
full horizon QLIKE                 : 7.08719452

--- Task 3 ---
1 day(s) MAE                       : 0.49542020
1 day(s) RMSE                      : 0.51788626
1 day(s) R2                        : -0.26587586
1 day(s) Pearson r                 : 0.01053806
1 day(s) QLIKE                     : 15.47961952
3 day(s) MAE                       : 0.48754887
3 day(s) RMSE                      : 0.50511198
3 day(s) R2                        : -0.20849514
3 day(s) Pearson r                 : 0.00152261
3 day(s) QLIKE                     : 15.47977937
5 day(s) MAE                       : 0.47867428
5 day(s) RMSE                      : 0.49219322
5 day(s) R2                        : -0.15130913
5 day(s) Pearson r                 : 0.00253218
5 day(s) QLIKE                     : 15.48147450
full horizon MAE                   : 0.47867428
full horizon RMSE                  : 0.49219322
full horizon R2                    : -0.15130913
full horizon Pearson r             : 0.00253218
full horizon QLIKE                 : 15.48147450

--- Task 4 ---
1 day(s) MAE                       : 1.03494832
1 day(s) RMSE                      : 1.25937489
1 day(s) R2                        : -1.23690755
1 day(s) Pearson r                 : 0.12137409
1 day(s) QLIKE                     : 0.99632345
3 day(s) MAE                       : 1.03354099
3 day(s) RMSE                      : 1.25333381
3 day(s) R2                        : -1.22426938
3 day(s) Pearson r                 : 0.00396884
3 day(s) QLIKE                     : 0.98967993
5 day(s) MAE                       : 1.03151866
5 day(s) RMSE                      : 1.24645823
5 day(s) R2                        : -1.20859588
5 day(s) Pearson r                 : 0.00731001
5 day(s) QLIKE                     : 0.98386279
full horizon MAE                   : 1.03151866
full horizon RMSE                  : 1.24645823
full horizon R2                    : -1.20859588
full horizon Pearson r             : 0.00731001
full horizon QLIKE                 : 0.98386279

--- Task 5 ---
1 day(s) MAE                       : 0.06571096
1 day(s) RMSE                      : 0.08005515
1 day(s) R2                        : -1.69903686
1 day(s) Pearson r                 : -0.26101939
1 day(s) QLIKE                     : 0.11163239
3 day(s) MAE                       : 0.05077131
3 day(s) RMSE                      : 0.06558723
3 day(s) R2                        : -0.77815127
3 day(s) Pearson r                 : 0.00636304
3 day(s) QLIKE                     : 0.17253106
5 day(s) MAE                       : 0.05333587
5 day(s) RMSE                      : 0.06646957
5 day(s) R2                        : -0.79090735
5 day(s) Pearson r                 : 0.01093631
5 day(s) QLIKE                     : 0.22711865
full horizon MAE                   : 0.05333587
full horizon RMSE                  : 0.06646957
full horizon R2                    : -0.79090735
full horizon Pearson r             : 0.01093631
full horizon QLIKE                 : 0.22711865

--- Task 6 ---
1 day(s) MAE                       : 1.54517484
1 day(s) RMSE                      : 1.69235306
1 day(s) R2                        : -0.31312167
1 day(s) Pearson r                 : 0.19128402
1 day(s) QLIKE                     : 0.03596833
3 day(s) MAE                       : 2.80135990
3 day(s) RMSE                      : 3.36577693
3 day(s) R2                        : -4.17234163
3 day(s) Pearson r                 : -0.00535594
3 day(s) QLIKE                     : 0.05917191
5 day(s) MAE                       : 6.08816989
5 day(s) RMSE                      : 7.56399271
5 day(s) R2                        : -24.98817477
5 day(s) Pearson r                 : -0.00884888
5 day(s) QLIKE                     : 0.12094343
full horizon MAE                   : 6.08816989
full horizon RMSE                  : 7.56399271
full horizon R2                    : -24.98817477
full horizon Pearson r             : -0.00884888
full horizon QLIKE                 : 0.12094343

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/AAPL/Custom_KAN_LSTM_H5.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=2.59225, max=3.08685

=== AAPL | H=10 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 10
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714204
  Min value:  -0.03434294798653995
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.149858145997081
  Min value:  -13.0982259671673
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.051920323212192546
  Min value:  -0.0338203458440787
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.909556794039411
  Min value:  -6.554914737131346
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.19306601030720755
  Min value:  -0.03421247164703902
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.8998157347365185
  Min value:  -13.0982259671673
Epoch 001 | phase=1 | train_loss=4.9976 | val_main=0.591480
Epoch 002 | phase=1 | train_loss=4.7587 | val_main=0.591577
Epoch 003 | phase=1 | train_loss=4.9780 | val_main=0.591626
Epoch 004 | phase=1 | train_loss=4.9096 | val_main=0.591647
Epoch 005 | phase=1 | train_loss=4.7706 | val_main=0.591579
Epoch 006 | phase=1 | train_loss=4.8389 | val_main=0.591620
Epoch 007 | phase=1 | train_loss=4.9549 | val_main=0.591703
Epoch 008 | phase=1 | train_loss=4.9579 | val_main=0.591605
Epoch 009 | phase=1 | train_loss=4.8700 | val_main=0.591637
Epoch 010 | phase=1 | train_loss=4.9039 | val_main=0.591662
Epoch 011 | phase=1 | train_loss=4.8354 | val_main=0.591630
Epoch 012 | phase=1 | train_loss=4.8875 | val_main=0.591619
Epoch 013 | phase=1 | train_loss=4.8638 | val_main=0.591618
Epoch 014 | phase=1 | train_loss=4.8710 | val_main=0.591653
Epoch 015 | phase=1 | train_loss=4.7845 | val_main=0.591678
Epoch 016 | phase=0 | train_loss=5.9830 | val_main=0.587268
Epoch 017 | phase=0 | train_loss=5.7894 | val_main=0.573910
Epoch 018 | phase=0 | train_loss=6.0005 | val_main=0.569595
Epoch 019 | phase=0 | train_loss=5.8008 | val_main=0.566631
Epoch 020 | phase=0 | train_loss=5.8804 | val_main=0.562590
Epoch 021 | phase=0 | train_loss=5.7765 | val_main=0.565525
Epoch 022 | phase=0 | train_loss=5.8871 | val_main=0.569739
Epoch 023 | phase=0 | train_loss=5.9654 | val_main=0.572219
Epoch 024 | phase=0 | train_loss=5.8246 | val_main=0.566105
Epoch 025 | phase=0 | train_loss=6.0971 | val_main=0.554751
Epoch 026 | phase=0 | train_loss=5.7697 | val_main=0.474335
Epoch 027 | phase=0 | train_loss=5.5786 | val_main=0.435445
Epoch 028 | phase=0 | train_loss=5.7261 | val_main=0.488873
Epoch 029 | phase=0 | train_loss=5.4016 | val_main=0.386680
Epoch 030 | phase=0 | train_loss=5.4291 | val_main=0.377969
Epoch 031 | phase=2 | train_loss=0.7440 | val_main=0.466510
Epoch 032 | phase=2 | train_loss=0.7338 | val_main=0.387323
Epoch 033 | phase=2 | train_loss=0.7269 | val_main=0.414941
Epoch 034 | phase=2 | train_loss=0.7204 | val_main=0.400250
Epoch 035 | phase=2 | train_loss=0.7203 | val_main=0.413027
Epoch 036 | phase=2 | train_loss=0.6948 | val_main=0.408802
Epoch 037 | phase=2 | train_loss=0.6910 | val_main=0.402995
Epoch 038 | phase=2 | train_loss=0.6966 | val_main=0.428999
Epoch 039 | phase=2 | train_loss=0.6971 | val_main=0.395387
Epoch 040 | phase=2 | train_loss=0.6861 | val_main=0.419196
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 60
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.40900681
1 day(s) RMSE                      : 8.63152780
1 day(s) R2                        : 0.00255651
1 day(s) Pearson r                 : 0.32745879
1 day(s) QLIKE                     : 0.38314056
3 day(s) MAE                       : 2.49867375
3 day(s) RMSE                      : 8.90470268
3 day(s) R2                        : -0.00336638
3 day(s) Pearson r                 : 0.31269487
3 day(s) QLIKE                     : 0.41044636
5 day(s) MAE                       : 2.52819879
5 day(s) RMSE                      : 8.98871572
5 day(s) R2                        : -0.01110715
5 day(s) Pearson r                 : 0.27386522
5 day(s) QLIKE                     : 0.42945206
10 day(s) MAE                      : 2.55504159
10 day(s) RMSE                     : 9.07959438
10 day(s) R2                       : -0.02316958
10 day(s) Pearson r                : 0.20831028
10 day(s) QLIKE                    : 0.45959905
full horizon MAE                   : 2.55504159
full horizon RMSE                  : 9.07959438
full horizon R2                    : -0.02316958
full horizon Pearson r             : 0.20831028
full horizon QLIKE                 : 0.45959905

--- Task 2 ---
1 day(s) MAE                       : 0.08521499
1 day(s) RMSE                      : 0.11652767
1 day(s) R2                        : -0.85696862
1 day(s) Pearson r                 : 0.30556452
1 day(s) QLIKE                     : 3.67605449
3 day(s) MAE                       : 0.08583480
3 day(s) RMSE                      : 0.11706553
3 day(s) R2                        : -0.85825421
3 day(s) Pearson r                 : 0.31768770
3 day(s) QLIKE                     : 3.66534425
5 day(s) MAE                       : 0.08629705
5 day(s) RMSE                      : 0.11759035
5 day(s) R2                        : -0.86121703
5 day(s) Pearson r                 : 0.29577166
5 day(s) QLIKE                     : 3.68036688
10 day(s) MAE                      : 0.08782582
10 day(s) RMSE                     : 0.11915583
10 day(s) R2                       : -0.88511290
10 day(s) Pearson r                : 0.25950830
10 day(s) QLIKE                    : 3.68846604
full horizon MAE                   : 0.08782582
full horizon RMSE                  : 0.11915583
full horizon R2                    : -0.88511290
full horizon Pearson r             : 0.25950830
full horizon QLIKE                 : 3.68846604

--- Task 3 ---
1 day(s) MAE                       : 0.50374023
1 day(s) RMSE                      : 0.52150717
1 day(s) R2                        : -0.28363901
1 day(s) Pearson r                 : -0.09998450
1 day(s) QLIKE                     : 15.50696589
3 day(s) MAE                       : 0.50102731
3 day(s) RMSE                      : 0.51913352
3 day(s) R2                        : -0.27652026
3 day(s) Pearson r                 : -0.08777129
3 day(s) QLIKE                     : 15.50341190
5 day(s) MAE                       : 0.49863147
5 day(s) RMSE                      : 0.51725957
5 day(s) R2                        : -0.27156264
5 day(s) Pearson r                 : -0.07428393
5 day(s) QLIKE                     : 15.49911166
10 day(s) MAE                      : 0.49353786
10 day(s) RMSE                     : 0.51380140
10 day(s) R2                       : -0.26527584
10 day(s) Pearson r                : -0.03121923
10 day(s) QLIKE                    : 15.48973787
full horizon MAE                   : 0.49353786
full horizon RMSE                  : 0.51380140
full horizon R2                    : -0.26527584
full horizon Pearson r             : -0.03121923
full horizon QLIKE                 : 15.48973787

--- Task 4 ---
1 day(s) MAE                       : 1.03995553
1 day(s) RMSE                      : 1.27213193
1 day(s) R2                        : -1.28245531
1 day(s) Pearson r                 : 0.11294152
1 day(s) QLIKE                     : 0.99555048
3 day(s) MAE                       : 1.04131688
3 day(s) RMSE                      : 1.27237728
3 day(s) R2                        : -1.29237514
3 day(s) Pearson r                 : 0.09532335
3 day(s) QLIKE                     : 0.98852189
5 day(s) MAE                       : 1.04269513
5 day(s) RMSE                      : 1.27285107
5 day(s) R2                        : -1.30311688
5 day(s) Pearson r                 : 0.08503811
5 day(s) QLIKE                     : 0.98132393
10 day(s) MAE                      : 1.04635897
10 day(s) RMSE                     : 1.27408447
10 day(s) R2                       : -1.33108317
10 day(s) Pearson r                : 0.03997526
10 day(s) QLIKE                    : 0.96329488
full horizon MAE                   : 1.04635897
full horizon RMSE                  : 1.27408447
full horizon R2                    : -1.33108317
full horizon Pearson r             : 0.03997526
full horizon QLIKE                 : 0.96329488

--- Task 5 ---
1 day(s) MAE                       : 0.16272801
1 day(s) RMSE                      : 0.18271942
1 day(s) R2                        : -13.06046345
1 day(s) Pearson r                 : -0.32473915
1 day(s) QLIKE                     : 0.74381645
3 day(s) MAE                       : 0.17412920
3 day(s) RMSE                      : 0.19670403
3 day(s) R2                        : -14.99398438
3 day(s) Pearson r                 : -0.32506647
3 day(s) QLIKE                     : 0.78296344
5 day(s) MAE                       : 0.17100334
5 day(s) RMSE                      : 0.19316717
5 day(s) R2                        : -14.12495101
5 day(s) Pearson r                 : -0.32854207
5 day(s) QLIKE                     : 0.76533237
10 day(s) MAE                      : 0.16499043
10 day(s) RMSE                     : 0.18668914
10 day(s) R2                       : -12.63357832
10 day(s) Pearson r                : -0.33241370
10 day(s) QLIKE                    : 0.75333162
full horizon MAE                   : 0.16499043
full horizon RMSE                  : 0.18668914
full horizon R2                    : -12.63357832
full horizon Pearson r             : -0.33241370
full horizon QLIKE                 : 0.75333162

--- Task 6 ---
1 day(s) MAE                       : 1.78076867
1 day(s) RMSE                      : 2.30784480
1 day(s) R2                        : -1.44194696
1 day(s) Pearson r                 : -0.47615094
1 day(s) QLIKE                     : 0.08039461
3 day(s) MAE                       : 1.74572315
3 day(s) RMSE                      : 2.26754810
3 day(s) R2                        : -1.34762952
3 day(s) Pearson r                 : -0.47408607
3 day(s) QLIKE                     : 0.07833172
5 day(s) MAE                       : 1.73604630
5 day(s) RMSE                      : 2.25666366
5 day(s) R2                        : -1.31316838
5 day(s) Pearson r                 : -0.47385418
5 day(s) QLIKE                     : 0.07804482
10 day(s) MAE                      : 1.72141617
10 day(s) RMSE                     : 2.23843532
10 day(s) R2                       : -1.24645367
10 day(s) Pearson r                : -0.47446097
10 day(s) QLIKE                    : 0.07799410
full horizon MAE                   : 1.72141617
full horizon RMSE                  : 2.23843532
full horizon R2                    : -1.24645367
full horizon Pearson r             : -0.47446097
full horizon QLIKE                 : 0.07799410

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/AAPL/Custom_KAN_H10.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=1.02408, max=5.72146

=== AAPL | H=10 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2018
Time steps for y: 10
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714204
  Min value:  -0.03434294798653995
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.149858145997081
  Min value:  -13.0982259671673
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.051920323212192546
  Min value:  -0.0338203458440787
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.909556794039411
  Min value:  -6.554914737131346
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.19306601030720755
  Min value:  -0.03421247164703902
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.8998157347365185
  Min value:  -13.0982259671673
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 114s 509ms/step - loss: 0.8379 - val_loss: 0.6560 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 14s 297ms/step - loss: 0.5647 - val_loss: 0.9022 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 15s 322ms/step - loss: 0.4796 - val_loss: 1.0637 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 15s 337ms/step - loss: 0.4385 - val_loss: 0.7686 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 14s 309ms/step - loss: 0.4208 - val_loss: 0.7959 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 14s 308ms/step - loss: 0.4102 - val_loss: 0.5208 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 15s 324ms/step - loss: 0.4053 - val_loss: 0.6875 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 14s 311ms/step - loss: 0.3939 - val_loss: 0.5701 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 14s 308ms/step - loss: 0.3847 - val_loss: 0.6253 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=10 tf_ratio=0.526 -> TF=ON
Epoch 10/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 14s 310ms/step - loss: 0.3815 - val_loss: 0.6930 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=11 tf_ratio=0.474 -> TF=ON
Epoch 11/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 0s 300ms/step - loss: 0.3510
Epoch 11: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
46/46 ━━━━━━━━━━━━━━━━━━━━ 14s 310ms/step - loss: 0.3742 - val_loss: 0.5463 - learning_rate: 5.0000e-04
Epoch 11: early stopping
Restoring model weights from the end of the best epoch: 6.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 10
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.46397343
1 day(s) RMSE                      : 8.71101124
1 day(s) R2                        : -0.01589799
1 day(s) Pearson r                 : 0.29350679
1 day(s) QLIKE                     : 0.53538775
3 day(s) MAE                       : 2.61535391
3 day(s) RMSE                      : 8.96426726
3 day(s) R2                        : -0.01683454
3 day(s) Pearson r                 : -0.00028160
3 day(s) QLIKE                     : 0.56559023
5 day(s) MAE                       : 2.58468178
5 day(s) RMSE                      : 9.04217206
5 day(s) R2                        : -0.02316912
5 day(s) Pearson r                 : -0.00196901
5 day(s) QLIKE                     : 0.57105434
10 day(s) MAE                      : 2.57626262
10 day(s) RMSE                     : 9.15704222
10 day(s) R2                       : -0.04069906
10 day(s) Pearson r                : -0.00350184
10 day(s) QLIKE                    : 0.58748597
full horizon MAE                   : 2.57626262
full horizon RMSE                  : 9.15704222
full horizon R2                    : -0.04069906
full horizon Pearson r             : -0.00350184
full horizon QLIKE                 : 0.58748597

--- Task 2 ---
1 day(s) MAE                       : 0.08223410
1 day(s) RMSE                      : 0.11168223
1 day(s) R2                        : -0.70574682
1 day(s) Pearson r                 : -0.27185307
1 day(s) QLIKE                     : 3.72287583
3 day(s) MAE                       : 0.08727535
3 day(s) RMSE                      : 0.11969820
3 day(s) R2                        : -0.94277399
3 day(s) Pearson r                 : -0.00584482
3 day(s) QLIKE                     : 5.63875843
5 day(s) MAE                       : 0.08964029
5 day(s) RMSE                      : 0.12277614
5 day(s) R2                        : -1.02899785
5 day(s) Pearson r                 : -0.00858837
5 day(s) QLIKE                     : 12.77932080
10 day(s) MAE                      : 0.09237686
10 day(s) RMSE                     : 0.12599738
10 day(s) R2                       : -1.10780207
10 day(s) Pearson r                : -0.01263680
10 day(s) QLIKE                    : 9.86217665
full horizon MAE                   : 0.09237686
full horizon RMSE                  : 0.12599738
full horizon R2                    : -1.10780207
full horizon Pearson r             : -0.01263680
full horizon QLIKE                 : 9.86217665

--- Task 3 ---
1 day(s) MAE                       : 0.50411234
1 day(s) RMSE                      : 0.53397198
1 day(s) R2                        : -0.34573414
1 day(s) Pearson r                 : -0.04932665
1 day(s) QLIKE                     : 15.47962049
3 day(s) MAE                       : 0.49878045
3 day(s) RMSE                      : 0.52543630
3 day(s) R2                        : -0.30770478
3 day(s) Pearson r                 : 0.00147403
3 day(s) QLIKE                     : 15.47887560
5 day(s) MAE                       : 0.47256735
5 day(s) RMSE                      : 0.50549898
5 day(s) R2                        : -0.21439861
5 day(s) Pearson r                 : 0.00226544
5 day(s) QLIKE                     : 15.57739185
10 day(s) MAE                      : 0.41810996
10 day(s) RMSE                     : 0.54148835
10 day(s) R2                       : -0.40531244
10 day(s) Pearson r                : 0.00470844
10 day(s) QLIKE                    : 18.44718140
full horizon MAE                   : 0.41810996
full horizon RMSE                  : 0.54148835
full horizon R2                    : -0.40531244
full horizon Pearson r             : 0.00470844
full horizon QLIKE                 : 18.44718140

--- Task 4 ---
1 day(s) MAE                       : 1.03791365
1 day(s) RMSE                      : 1.26655455
1 day(s) R2                        : -1.26248532
1 day(s) Pearson r                 : 0.13472507
1 day(s) QLIKE                     : 0.99634811
3 day(s) MAE                       : 1.02461998
3 day(s) RMSE                      : 1.23299913
3 day(s) R2                        : -1.15267971
3 day(s) Pearson r                 : 0.00204501
3 day(s) QLIKE                     : 1.00477906
5 day(s) MAE                       : 0.99911457
5 day(s) RMSE                      : 1.17838795
5 day(s) R2                        : -0.97395560
5 day(s) Pearson r                 : 0.00330661
5 day(s) QLIKE                     : 1.04700206
10 day(s) MAE                      : 0.97355989
10 day(s) RMSE                     : 1.12419399
10 day(s) R2                       : -0.81486300
10 day(s) Pearson r                : 0.00679542
10 day(s) QLIKE                    : 1.03573813
full horizon MAE                   : 0.97355989
full horizon RMSE                  : 1.12419399
full horizon R2                    : -0.81486300
full horizon Pearson r             : 0.00679542
full horizon QLIKE                 : 1.03573813

--- Task 5 ---
1 day(s) MAE                       : 0.05919999
1 day(s) RMSE                      : 0.07398360
1 day(s) R2                        : -1.30516046
1 day(s) Pearson r                 : 0.29619951
1 day(s) QLIKE                     : 0.11160756
3 day(s) MAE                       : 0.06395891
3 day(s) RMSE                      : 0.07878769
3 day(s) R2                        : -1.56594265
3 day(s) Pearson r                 : -0.00608440
3 day(s) QLIKE                     : 0.11997811
5 day(s) MAE                       : 0.07028057
5 day(s) RMSE                      : 0.08521099
5 day(s) R2                        : -1.94319210
5 day(s) Pearson r                 : -0.01064519
5 day(s) QLIKE                     : 0.15374097
10 day(s) MAE                      : 0.07997829
10 day(s) RMSE                     : 0.09472834
10 day(s) R2                       : -2.51019797
10 day(s) Pearson r                : -0.01753278
10 day(s) QLIKE                    : 0.19053128
full horizon MAE                   : 0.07997829
full horizon RMSE                  : 0.09472834
full horizon R2                    : -2.51019797
full horizon Pearson r             : -0.01753278
full horizon QLIKE                 : 0.19053128

--- Task 6 ---
1 day(s) MAE                       : 1.42452141
1 day(s) RMSE                      : 1.57135496
1 day(s) R2                        : -0.13206568
1 day(s) Pearson r                 : 0.29007043
1 day(s) QLIKE                     : 0.03596429
3 day(s) MAE                       : 1.27956033
3 day(s) RMSE                      : 1.51695858
3 day(s) R2                        : -0.05066433
3 day(s) Pearson r                 : 0.00581808
3 day(s) QLIKE                     : 0.03764986
5 day(s) MAE                       : 1.22242794
5 day(s) RMSE                      : 1.56132889
5 day(s) R2                        : -0.10729253
5 day(s) Pearson r                 : 0.00904879
5 day(s) QLIKE                     : 0.04048249
10 day(s) MAE                      : 1.28327009
10 day(s) RMSE                     : 1.73329532
10 day(s) R2                       : -0.34695627
10 day(s) Pearson r                : 0.01687602
10 day(s) QLIKE                    : 0.04496652
full horizon MAE                   : 1.28327009
full horizon RMSE                  : 1.73329532
full horizon R2                    : -0.34695627
full horizon Pearson r             : 0.01687602
full horizon QLIKE                 : 0.04496652

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/AAPL/Custom_KAN_LSTM_H10.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=1.5093, max=3.16441

=== AAPL | H=20 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 20
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714204
  Min value:  -0.03434294798653995
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.174045403839706
  Min value:  -13.098372439983104
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.051920323212192546
  Min value:  -0.0338203458440787
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.913020640489958
  Min value:  -6.554949262074746
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.19306601030720755
  Min value:  -0.03421247164703902
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.906568131251448
  Min value:  -13.098372439983104
Epoch 001 | phase=1 | train_loss=4.9979 | val_main=0.591742
Epoch 002 | phase=1 | train_loss=4.7912 | val_main=0.591874
Epoch 003 | phase=1 | train_loss=4.9514 | val_main=0.591954
Epoch 004 | phase=1 | train_loss=4.9156 | val_main=0.591972
Epoch 005 | phase=1 | train_loss=4.7861 | val_main=0.591836
Epoch 006 | phase=1 | train_loss=4.8890 | val_main=0.591915
Epoch 007 | phase=1 | train_loss=4.8696 | val_main=0.591993
Epoch 008 | phase=1 | train_loss=4.9981 | val_main=0.591872
Epoch 009 | phase=1 | train_loss=4.8941 | val_main=0.591918
Epoch 010 | phase=1 | train_loss=4.8847 | val_main=0.591972
Epoch 011 | phase=1 | train_loss=4.8457 | val_main=0.591894
Epoch 012 | phase=1 | train_loss=4.8882 | val_main=0.591876
Epoch 013 | phase=1 | train_loss=4.8547 | val_main=0.591914
Epoch 014 | phase=1 | train_loss=4.8544 | val_main=0.591955
Epoch 015 | phase=1 | train_loss=4.7870 | val_main=0.591989
Epoch 016 | phase=0 | train_loss=6.0241 | val_main=0.589061
Epoch 017 | phase=0 | train_loss=5.8466 | val_main=0.577694
Epoch 018 | phase=0 | train_loss=5.9788 | val_main=0.575886
Epoch 019 | phase=0 | train_loss=5.8163 | val_main=0.573202
Epoch 020 | phase=0 | train_loss=5.8210 | val_main=0.566247
Epoch 021 | phase=0 | train_loss=5.8500 | val_main=0.569827
Epoch 022 | phase=0 | train_loss=5.9337 | val_main=0.575103
Epoch 023 | phase=0 | train_loss=5.9664 | val_main=0.574382
Epoch 024 | phase=0 | train_loss=5.8163 | val_main=0.572494
Epoch 025 | phase=0 | train_loss=6.0288 | val_main=0.569530
Epoch 026 | phase=0 | train_loss=5.8468 | val_main=0.552994
Epoch 027 | phase=0 | train_loss=5.7339 | val_main=0.450604
Epoch 028 | phase=0 | train_loss=5.8713 | val_main=0.469513
Epoch 029 | phase=0 | train_loss=5.5493 | val_main=0.491613
Epoch 030 | phase=0 | train_loss=5.5614 | val_main=0.383899
Epoch 031 | phase=2 | train_loss=0.7786 | val_main=0.449028
Epoch 032 | phase=2 | train_loss=0.7797 | val_main=0.463928
Epoch 033 | phase=2 | train_loss=0.7785 | val_main=0.427523
Epoch 034 | phase=2 | train_loss=0.7704 | val_main=0.404972
Epoch 035 | phase=2 | train_loss=0.7650 | val_main=0.434853
Epoch 036 | phase=2 | train_loss=0.7481 | val_main=0.450797
Epoch 037 | phase=2 | train_loss=0.7463 | val_main=0.415057
Epoch 038 | phase=2 | train_loss=0.7459 | val_main=0.438108
Epoch 039 | phase=2 | train_loss=0.7503 | val_main=0.435642
Epoch 040 | phase=2 | train_loss=0.7380 | val_main=0.432412
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 120
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.34890330
1 day(s) RMSE                      : 8.54669265
1 day(s) R2                        : 0.02206695
1 day(s) Pearson r                 : 0.33516735
1 day(s) QLIKE                     : 0.38060120
3 day(s) MAE                       : 2.45140157
3 day(s) RMSE                      : 8.82670105
3 day(s) R2                        : 0.01413481
3 day(s) Pearson r                 : 0.31800991
3 day(s) QLIKE                     : 0.40802753
5 day(s) MAE                       : 2.48510319
5 day(s) RMSE                      : 8.91597622
5 day(s) R2                        : 0.00519103
5 day(s) Pearson r                 : 0.27990843
5 day(s) QLIKE                     : 0.42716715
10 day(s) MAE                      : 2.51665646
10 day(s) RMSE                     : 9.01025918
10 day(s) R2                       : -0.00760263
10 day(s) Pearson r                : 0.22270058
10 day(s) QLIKE                    : 0.45354420
20 day(s) MAE                      : 2.54592700
20 day(s) RMSE                     : 9.06581028
20 day(s) R2                       : -0.01572627
20 day(s) Pearson r                : 0.18654300
20 day(s) QLIKE                    : 0.47167433
full horizon MAE                   : 2.54592700
full horizon RMSE                  : 9.06581028
full horizon R2                    : -0.01572627
full horizon Pearson r             : 0.18654300
full horizon QLIKE                 : 0.47167433

--- Task 2 ---
1 day(s) MAE                       : 0.08309117
1 day(s) RMSE                      : 0.11358503
1 day(s) R2                        : -0.76436573
1 day(s) Pearson r                 : 0.33496882
1 day(s) QLIKE                     : 3.63579061
3 day(s) MAE                       : 0.08372854
3 day(s) RMSE                      : 0.11419420
3 day(s) R2                        : -0.76821527
3 day(s) Pearson r                 : 0.33494978
3 day(s) QLIKE                     : 3.63352689
5 day(s) MAE                       : 0.08439980
5 day(s) RMSE                      : 0.11487603
5 day(s) R2                        : -0.77628429
5 day(s) Pearson r                 : 0.33357040
5 day(s) QLIKE                     : 3.63291191
10 day(s) MAE                      : 0.08601066
10 day(s) RMSE                     : 0.11652892
10 day(s) R2                       : -0.80291080
10 day(s) Pearson r                : 0.31236896
10 day(s) QLIKE                    : 3.63771561
20 day(s) MAE                      : 0.08909036
20 day(s) RMSE                     : 0.11939357
20 day(s) R2                       : -0.85660469
20 day(s) Pearson r                : 0.27040859
20 day(s) QLIKE                    : 3.61036604
full horizon MAE                   : 0.08909036
full horizon RMSE                  : 0.11939357
full horizon R2                    : -0.85660469
full horizon Pearson r             : 0.27040859
full horizon QLIKE                 : 3.61036604

--- Task 3 ---
1 day(s) MAE                       : 0.51082842
1 day(s) RMSE                      : 0.53352974
1 day(s) R2                        : -0.34350599
1 day(s) Pearson r                 : -0.09068248
1 day(s) QLIKE                     : 15.49655782
3 day(s) MAE                       : 0.50816043
3 day(s) RMSE                      : 0.53107573
3 day(s) R2                        : -0.33592624
3 day(s) Pearson r                 : -0.08008046
3 day(s) QLIKE                     : 15.49329782
5 day(s) MAE                       : 0.50578417
5 day(s) RMSE                      : 0.52909023
5 day(s) R2                        : -0.33039371
5 day(s) Pearson r                 : -0.06559234
5 day(s) QLIKE                     : 15.48993906
10 day(s) MAE                      : 0.50167971
10 day(s) RMSE                     : 0.52649312
10 day(s) R2                       : -0.32855657
10 day(s) Pearson r                : -0.02469918
10 day(s) QLIKE                    : 15.48248281
20 day(s) MAE                      : 0.49409997
20 day(s) RMSE                     : 0.52204398
20 day(s) R2                       : -0.32847608
20 day(s) Pearson r                : 0.04618538
20 day(s) QLIKE                    : 15.46933326
full horizon MAE                   : 0.49409997
full horizon RMSE                  : 0.52204398
full horizon R2                    : -0.32847608
full horizon Pearson r             : 0.04618538
full horizon QLIKE                 : 15.46933326

--- Task 4 ---
1 day(s) MAE                       : 1.03963986
1 day(s) RMSE                      : 1.27125416
1 day(s) R2                        : -1.27930660
1 day(s) Pearson r                 : 0.10507892
1 day(s) QLIKE                     : 0.99573043
3 day(s) MAE                       : 1.04100208
3 day(s) RMSE                      : 1.27152411
3 day(s) R2                        : -1.28930192
3 day(s) Pearson r                 : 0.09040070
3 day(s) QLIKE                     : 0.98866650
5 day(s) MAE                       : 1.04234142
5 day(s) RMSE                      : 1.27194612
5 day(s) R2                        : -1.29984320
5 day(s) Pearson r                 : 0.08229582
5 day(s) QLIKE                     : 0.98141668
10 day(s) MAE                      : 1.04598383
10 day(s) RMSE                     : 1.27316285
10 day(s) R2                       : -1.32771197
10 day(s) Pearson r                : 0.04029546
10 day(s) QLIKE                    : 0.96333336
20 day(s) MAE                      : 1.05365794
20 day(s) RMSE                     : 1.27623325
20 day(s) R2                       : -1.38804969
20 day(s) Pearson r                : -0.01364550
20 day(s) QLIKE                    : 0.92844019
full horizon MAE                   : 1.05365794
full horizon RMSE                  : 1.27623325
full horizon R2                    : -1.38804969
full horizon Pearson r             : -0.01364550
full horizon QLIKE                 : 0.92844019

--- Task 5 ---
1 day(s) MAE                       : 0.04980700
1 day(s) RMSE                      : 0.06672287
1 day(s) R2                        : -0.87490658
1 day(s) Pearson r                 : -0.33058753
1 day(s) QLIKE                     : 0.27001455
3 day(s) MAE                       : 0.04998157
3 day(s) RMSE                      : 0.06717294
3 day(s) R2                        : -0.86517205
3 day(s) Pearson r                 : -0.33365717
3 day(s) QLIKE                     : 0.27319780
5 day(s) MAE                       : 0.04966397
5 day(s) RMSE                      : 0.06720294
5 day(s) R2                        : -0.83064417
5 day(s) Pearson r                 : -0.33253131
5 day(s) QLIKE                     : 0.26225426
10 day(s) MAE                      : 0.05015189
10 day(s) RMSE                     : 0.06796766
10 day(s) R2                       : -0.80707560
10 day(s) Pearson r                : -0.33422610
10 day(s) QLIKE                    : 0.25819816
20 day(s) MAE                      : 0.05169919
20 day(s) RMSE                     : 0.06904645
20 day(s) R2                       : -0.78376980
20 day(s) Pearson r                : -0.32317876
20 day(s) QLIKE                    : 0.23751577
full horizon MAE                   : 0.05169919
full horizon RMSE                  : 0.06904645
full horizon R2                    : -0.78376980
full horizon Pearson r             : -0.32317876
full horizon QLIKE                 : 0.23751577

--- Task 6 ---
1 day(s) MAE                       : 1.68330074
1 day(s) RMSE                      : 2.14070353
1 day(s) R2                        : -1.10104850
1 day(s) Pearson r                 : -0.47932257
1 day(s) QLIKE                     : 0.07053657
3 day(s) MAE                       : 1.65432934
3 day(s) RMSE                      : 2.08836267
3 day(s) R2                        : -0.99126191
3 day(s) Pearson r                 : -0.47967895
3 day(s) QLIKE                     : 0.06800707
5 day(s) MAE                       : 1.65668251
5 day(s) RMSE                      : 2.08855352
5 day(s) R2                        : -0.98136640
5 day(s) Pearson r                 : -0.48016096
5 day(s) QLIKE                     : 0.06820685
10 day(s) MAE                      : 1.66806026
10 day(s) RMSE                     : 2.09919464
10 day(s) R2                       : -0.97566718
10 day(s) Pearson r                : -0.47899850
10 day(s) QLIKE                    : 0.06915634
20 day(s) MAE                      : 1.66807621
20 day(s) RMSE                     : 2.08973765
20 day(s) R2                       : -0.91595456
20 day(s) Pearson r                : -0.47295018
20 day(s) QLIKE                    : 0.06933301
full horizon MAE                   : 1.66807621
full horizon RMSE                  : 2.08973765
full horizon R2                    : -0.91595456
full horizon Pearson r             : -0.47295018
full horizon QLIKE                 : 0.06933301

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/AAPL/Custom_KAN_H20.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=1.21563, max=6.07227

=== AAPL | H=20 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2018
Time steps for y: 20
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.099874372714204
  Min value:  -0.03434294798653995
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.174045403839706
  Min value:  -13.098372439983104
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.051920323212192546
  Min value:  -0.0338203458440787
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.913020640489958
  Min value:  -6.554949262074746
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.19306601030720755
  Min value:  -0.03421247164703902
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.906568131251448
  Min value:  -13.098372439983104
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 122s 548ms/step - loss: 0.8392 - val_loss: 0.7273 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 17s 380ms/step - loss: 0.5311 - val_loss: 0.5729 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 450ms/step - loss: 0.4380 - val_loss: 0.9895 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 22s 474ms/step - loss: 0.3960 - val_loss: 0.5673 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 450ms/step - loss: 0.3701 - val_loss: 0.9167 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 450ms/step - loss: 0.3543 - val_loss: 0.5071 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 465ms/step - loss: 0.3415 - val_loss: 0.7707 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 456ms/step - loss: 0.3370 - val_loss: 0.5400 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 460ms/step - loss: 0.3300 - val_loss: 0.7480 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=10 tf_ratio=0.526 -> TF=ON
Epoch 10/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 448ms/step - loss: 0.3193 - val_loss: 1.3389 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=11 tf_ratio=0.474 -> TF=ON
Epoch 11/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 0s 434ms/step - loss: 0.2917
Epoch 11: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 447ms/step - loss: 0.3087 - val_loss: 0.7788 - learning_rate: 5.0000e-04
Epoch 11: early stopping
Restoring model weights from the end of the best epoch: 6.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 20
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.48920489
1 day(s) RMSE                      : 8.70133537
1 day(s) R2                        : -0.01364240
1 day(s) Pearson r                 : 0.47237885
1 day(s) QLIKE                     : 0.53532435
3 day(s) MAE                       : 2.62344394
3 day(s) RMSE                      : 8.94640594
3 day(s) R2                        : -0.01278649
3 day(s) Pearson r                 : 0.00859488
3 day(s) QLIKE                     : 0.55513676
5 day(s) MAE                       : 2.59093773
5 day(s) RMSE                      : 9.06902202
5 day(s) R2                        : -0.02925456
5 day(s) Pearson r                 : -0.00117803
5 day(s) QLIKE                     : 0.59108920
10 day(s) MAE                      : 2.66165568
10 day(s) RMSE                     : 9.22587455
10 day(s) R2                       : -0.05640346
10 day(s) Pearson r                : -0.00343953
10 day(s) QLIKE                    : 0.62629859
20 day(s) MAE                      : 2.62517297
20 day(s) RMSE                     : 9.21871665
20 day(s) R2                       : -0.05027823
20 day(s) Pearson r                : -0.00235928
20 day(s) QLIKE                    : 0.60221160
full horizon MAE                   : 2.62517297
full horizon RMSE                  : 9.21871665
full horizon R2                    : -0.05027823
full horizon Pearson r             : -0.00235928
full horizon QLIKE                 : 0.60221160

--- Task 2 ---
1 day(s) MAE                       : 0.08361652
1 day(s) RMSE                      : 0.11458045
1 day(s) R2                        : -0.79542583
1 day(s) Pearson r                 : -0.26244867
1 day(s) QLIKE                     : 3.72293760
3 day(s) MAE                       : 0.08474677
3 day(s) RMSE                      : 0.11617794
3 day(s) R2                        : -0.83018249
3 day(s) Pearson r                 : -0.00590791
3 day(s) QLIKE                     : 3.75901187
5 day(s) MAE                       : 0.08788551
5 day(s) RMSE                      : 0.12047831
5 day(s) R2                        : -0.95376067
5 day(s) Pearson r                 : -0.00994152
5 day(s) QLIKE                     : 7.20769466
10 day(s) MAE                      : 0.09149871
10 day(s) RMSE                     : 0.12488260
10 day(s) R2                       : -1.07066899
10 day(s) Pearson r                : -0.01604894
10 day(s) QLIKE                    : 12.79176248
20 day(s) MAE                      : 0.09537101
20 day(s) RMSE                     : 0.12894501
20 day(s) R2                       : -1.16554206
20 day(s) Pearson r                : -0.02363395
20 day(s) QLIKE                    : 8.86001360
full horizon MAE                   : 0.09537101
full horizon RMSE                  : 0.12894501
full horizon R2                    : -1.16554206
full horizon Pearson r             : -0.02363395
full horizon QLIKE                 : 8.86001360

--- Task 3 ---
1 day(s) MAE                       : 0.50195262
1 day(s) RMSE                      : 0.53001280
1 day(s) R2                        : -0.32585202
1 day(s) Pearson r                 : -0.05920515
1 day(s) QLIKE                     : 15.47962278
3 day(s) MAE                       : 0.48842608
3 day(s) RMSE                      : 0.50704975
3 day(s) R2                        : -0.21778527
3 day(s) Pearson r                 : 0.00086605
3 day(s) QLIKE                     : 15.48105647
5 day(s) MAE                       : 0.50588580
5 day(s) RMSE                      : 0.54159480
5 day(s) R2                        : -0.39402212
5 day(s) Pearson r                 : -0.00164752
5 day(s) QLIKE                     : 15.48651871
10 day(s) MAE                      : 0.55283640
10 day(s) RMSE                     : 0.63815808
10 day(s) R2                       : -0.95187139
10 day(s) Pearson r                : -0.00460666
10 day(s) QLIKE                    : 15.49933686
20 day(s) MAE                      : 0.59707377
20 day(s) RMSE                     : 0.71005362
20 day(s) R2                       : -1.45766029
20 day(s) Pearson r                : -0.00767625
20 day(s) QLIKE                    : 15.49312292
full horizon MAE                   : 0.59707377
full horizon RMSE                  : 0.71005362
full horizon R2                    : -1.45766029
full horizon Pearson r             : -0.00767625
full horizon QLIKE                 : 15.49312292

--- Task 4 ---
1 day(s) MAE                       : 1.02268840
1 day(s) RMSE                      : 1.22991890
1 day(s) R2                        : -1.13349154
1 day(s) Pearson r                 : -0.08464380
1 day(s) QLIKE                     : 0.99637230
3 day(s) MAE                       : 1.02969101
3 day(s) RMSE                      : 1.24410641
3 day(s) R2                        : -1.19163854
3 day(s) Pearson r                 : -0.00130991
3 day(s) QLIKE                     : 0.99136897
5 day(s) MAE                       : 1.02607872
5 day(s) RMSE                      : 1.23380864
5 day(s) R2                        : -1.16399577
5 day(s) Pearson r                 : 0.00263449
5 day(s) QLIKE                     : 0.98680586
10 day(s) MAE                      : 1.00204416
10 day(s) RMSE                     : 1.18508553
10 day(s) R2                       : -1.01679017
10 day(s) Pearson r                : 0.00585048
10 day(s) QLIKE                    : 1.02258363
20 day(s) MAE                      : 0.94257148
20 day(s) RMSE                     : 1.08094961
20 day(s) R2                       : -0.71314509
20 day(s) Pearson r                : 0.01273928
20 day(s) QLIKE                    : 1.05559586
full horizon MAE                   : 0.94257148
full horizon RMSE                  : 1.08094961
full horizon R2                    : -0.71314509
full horizon Pearson r             : 0.01273928
full horizon QLIKE                 : 1.05559586

--- Task 5 ---
1 day(s) MAE                       : 0.06608492
1 day(s) RMSE                      : 0.08040594
1 day(s) R2                        : -1.72274164
1 day(s) Pearson r                 : 0.30484880
1 day(s) QLIKE                     : 0.11160304
3 day(s) MAE                       : 0.06771440
3 day(s) RMSE                      : 0.08220846
3 day(s) R2                        : -1.79359321
3 day(s) Pearson r                 : 0.00202635
3 day(s) QLIKE                     : 0.11708741
5 day(s) MAE                       : 0.06584282
5 day(s) RMSE                      : 0.08072658
5 day(s) R2                        : -1.64155957
5 day(s) Pearson r                 : 0.00579872
5 day(s) QLIKE                     : 0.11976656
10 day(s) MAE                      : 0.08044462
10 day(s) RMSE                     : 0.09568450
10 day(s) R2                       : -2.58141733
10 day(s) Pearson r                : -0.01436070
10 day(s) QLIKE                    : 0.86469826
20 day(s) MAE                      : 0.09672599
20 day(s) RMSE                     : 0.11115728
20 day(s) R2                       : -3.62308525
20 day(s) Pearson r                : -0.02547409
20 day(s) QLIKE                    : 6.33471813
full horizon MAE                   : 0.09672599
full horizon RMSE                  : 0.11115728
full horizon R2                    : -3.62308525
full horizon Pearson r             : -0.02547409
full horizon QLIKE                 : 6.33471813

--- Task 6 ---
1 day(s) MAE                       : 1.43006278
1 day(s) RMSE                      : 1.57610697
1 day(s) R2                        : -0.13892311
1 day(s) Pearson r                 : -0.16235651
1 day(s) QLIKE                     : 0.03597401
3 day(s) MAE                       : 1.42549737
3 day(s) RMSE                      : 1.57444226
3 day(s) R2                        : -0.13180086
3 day(s) Pearson r                 : -0.02399268
3 day(s) QLIKE                     : 0.03628156
5 day(s) MAE                       : 1.34326766
5 day(s) RMSE                      : 1.54227969
5 day(s) R2                        : -0.08043801
5 day(s) Pearson r                 : 0.00676982
5 day(s) QLIKE                     : 0.03767898
10 day(s) MAE                      : 1.26659919
10 day(s) RMSE                     : 1.53701849
10 day(s) R2                       : -0.05917214
10 day(s) Pearson r                : 0.01693161
10 day(s) QLIKE                    : 0.03966460
20 day(s) MAE                      : 1.21441608
20 day(s) RMSE                     : 1.59831077
20 day(s) R2                       : -0.12078968
20 day(s) Pearson r                : 0.04117961
20 day(s) QLIKE                    : 0.04305269
full horizon MAE                   : 1.21441608
full horizon RMSE                  : 1.59831077
full horizon R2                    : -0.12078968
full horizon Pearson r             : 0.04117961
full horizon QLIKE                 : 0.04305269

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/AAPL/Custom_KAN_LSTM_H20.pkl

Saved y_true min=0.314451, max=112.408
Saved y_pred min=1.02872, max=3.04886

=== MSFT | H=1 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.550928529745955
  Min value:  -0.49195094366183095
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.892531280455467
  Min value:  -4.877434769908842
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9099560750963094
  Min value:  -0.42477627271922275
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.8947596773346853
  Min value:  -6.550744378714031
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.030086059790822
  Min value:  -0.4794650148863312
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.930619914036179
  Min value:  -181.24706122190173
Epoch 001 | phase=1 | train_loss=4.9498 | val_main=0.458259
Epoch 002 | phase=1 | train_loss=4.8817 | val_main=0.458262
Epoch 003 | phase=1 | train_loss=4.7267 | val_main=0.458264
Epoch 004 | phase=1 | train_loss=4.4710 | val_main=0.458279
Epoch 005 | phase=1 | train_loss=4.4481 | val_main=0.458297
Epoch 006 | phase=1 | train_loss=4.2545 | val_main=0.458305
Epoch 007 | phase=1 | train_loss=4.1628 | val_main=0.458327
Epoch 008 | phase=1 | train_loss=3.8979 | val_main=0.458321
Epoch 009 | phase=1 | train_loss=3.4693 | val_main=0.458376
Epoch 010 | phase=1 | train_loss=3.2632 | val_main=0.458380
Epoch 011 | phase=1 | train_loss=3.0020 | val_main=0.458430
Epoch 012 | phase=1 | train_loss=2.8576 | val_main=0.458452
Epoch 013 | phase=1 | train_loss=2.6502 | val_main=0.458449
Epoch 014 | phase=1 | train_loss=2.5666 | val_main=0.458481
Epoch 015 | phase=1 | train_loss=2.4990 | val_main=0.458527
Epoch 016 | phase=0 | train_loss=3.4069 | val_main=0.463098
Epoch 017 | phase=0 | train_loss=3.1957 | val_main=0.450229
Epoch 018 | phase=0 | train_loss=2.9886 | val_main=0.454575
Epoch 019 | phase=0 | train_loss=2.8270 | val_main=0.451153
Epoch 020 | phase=0 | train_loss=2.6330 | val_main=0.471209
Epoch 021 | phase=0 | train_loss=2.5705 | val_main=0.494933
Epoch 022 | phase=0 | train_loss=2.3647 | val_main=0.506408
Epoch 023 | phase=0 | train_loss=2.2821 | val_main=0.520924
Epoch 024 | phase=0 | train_loss=2.0893 | val_main=0.493835
Epoch 025 | phase=0 | train_loss=1.9584 | val_main=0.495601
Epoch 026 | phase=0 | train_loss=1.8881 | val_main=0.527587
Epoch 027 | phase=0 | train_loss=1.6580 | val_main=0.516662
Epoch 028 | phase=0 | train_loss=2.2990 | val_main=0.543893
Epoch 029 | phase=0 | train_loss=1.7506 | val_main=0.543283
Epoch 030 | phase=0 | train_loss=1.5064 | val_main=0.530209
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 6
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.06569565
1 day(s) RMSE                      : 6.74668536
1 day(s) R2                        : -0.01314609
1 day(s) Pearson r                 : 0.04933923
1 day(s) QLIKE                     : 0.48041865
full horizon MAE                   : 2.06569565
full horizon RMSE                  : 6.74668536
full horizon R2                    : -0.01314609
full horizon Pearson r             : 0.04933923
full horizon QLIKE                 : 0.48041865

--- Task 2 ---
1 day(s) MAE                       : 0.11191474
1 day(s) RMSE                      : 0.12254706
1 day(s) R2                        : -14.83918425
1 day(s) Pearson r                 : -0.34973109
1 day(s) QLIKE                     : 14.56362632
full horizon MAE                   : 0.11191474
full horizon RMSE                  : 0.12254706
full horizon R2                    : -14.83918425
full horizon Pearson r             : -0.34973109
full horizon QLIKE                 : 14.56362632

--- Task 3 ---
1 day(s) MAE                       : 0.23168203
1 day(s) RMSE                      : 0.30513781
1 day(s) R2                        : -0.05150632
1 day(s) Pearson r                 : -0.22572153
1 day(s) QLIKE                     : 1.53854846
full horizon MAE                   : 0.23168203
full horizon RMSE                  : 0.30513781
full horizon R2                    : -0.05150632
full horizon Pearson r             : -0.22572153
full horizon QLIKE                 : 1.53854846

--- Task 4 ---
1 day(s) MAE                       : 0.37990357
1 day(s) RMSE                      : 0.59628913
1 day(s) R2                        : -0.34808989
1 day(s) Pearson r                 : -0.27561779
1 day(s) QLIKE                     : 3.22918412
full horizon MAE                   : 0.37990357
full horizon RMSE                  : 0.59628913
full horizon R2                    : -0.34808989
full horizon Pearson r             : -0.27561779
full horizon QLIKE                 : 3.22918412

--- Task 5 ---
1 day(s) MAE                       : 0.08285953
1 day(s) RMSE                      : 0.11006984
1 day(s) R2                        : -8.65899036
1 day(s) Pearson r                 : 0.00029409
1 day(s) QLIKE                     : 0.46723172
full horizon MAE                   : 0.08285953
full horizon RMSE                  : 0.11006984
full horizon R2                    : -8.65899036
full horizon Pearson r             : 0.00029409
full horizon QLIKE                 : 0.46723172

--- Task 6 ---
1 day(s) MAE                       : 4.17324130
1 day(s) RMSE                      : 5.52334024
1 day(s) R2                        : -0.43603498
1 day(s) Pearson r                 : -0.13130473
1 day(s) QLIKE                     : 0.15742201
full horizon MAE                   : 4.17324130
full horizon RMSE                  : 5.52334024
full horizon R2                    : -0.43603498
full horizon Pearson r             : -0.13130473
full horizon QLIKE                 : 0.15742201

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/MSFT/Custom_KAN_H1.pkl

Saved y_true min=0.27301, max=89.8516
Saved y_pred min=1.8109, max=4.37427

=== MSFT | H=1 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2018
Time steps for y: 1
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.550928529745955
  Min value:  -0.49195094366183095
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.892531280455467
  Min value:  -4.877434769908842
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9099560750963094
  Min value:  -0.42477627271922275
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.8947596773346853
  Min value:  -6.550744378714031
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.030086059790822
  Min value:  -0.4794650148863312
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.930619914036179
  Min value:  -181.24706122190173
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 97s 338ms/step - loss: 1.0103 - val_loss: 0.5394 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 7s 163ms/step - loss: 0.9547 - val_loss: 0.5279 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 8s 184ms/step - loss: 0.9124 - val_loss: 0.5376 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 8s 184ms/step - loss: 0.9001 - val_loss: 0.5250 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 8s 183ms/step - loss: 0.8891 - val_loss: 0.5296 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 8s 184ms/step - loss: 0.8908 - val_loss: 0.5450 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 8s 182ms/step - loss: 0.8852 - val_loss: 0.5576 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 8s 183ms/step - loss: 0.8760 - val_loss: 0.5373 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 0s 181ms/step - loss: 0.8971
Epoch 9: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 186ms/step - loss: 0.8853 - val_loss: 0.5637 - learning_rate: 5.0000e-04
Epoch 9: early stopping
Restoring model weights from the end of the best epoch: 4.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 1
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 1.96958293
1 day(s) RMSE                      : 6.59991965
1 day(s) R2                        : 0.03045391
1 day(s) Pearson r                 : 0.26251946
1 day(s) QLIKE                     : 0.37953376
full horizon MAE                   : 1.96958293
full horizon RMSE                  : 6.59991965
full horizon R2                    : 0.03045391
full horizon Pearson r             : 0.26251946
full horizon QLIKE                 : 0.37953376

--- Task 2 ---
1 day(s) MAE                       : 0.02096519
1 day(s) RMSE                      : 0.03189231
1 day(s) R2                        : -0.07275115
1 day(s) Pearson r                 : 0.01570202
1 day(s) QLIKE                     : 13.27161376
full horizon MAE                   : 0.02096519
full horizon RMSE                  : 0.03189231
full horizon R2                    : -0.07275115
full horizon Pearson r             : 0.01570202
full horizon QLIKE                 : 13.27161376

--- Task 3 ---
1 day(s) MAE                       : 0.21920741
1 day(s) RMSE                      : 0.30082655
1 day(s) R2                        : -0.02200304
1 day(s) Pearson r                 : -0.00739623
1 day(s) QLIKE                     : 1.53489210
full horizon MAE                   : 0.21920741
full horizon RMSE                  : 0.30082655
full horizon R2                    : -0.02200304
full horizon Pearson r             : -0.00739623
full horizon QLIKE                 : 1.53489210

--- Task 4 ---
1 day(s) MAE                       : 0.36466276
1 day(s) RMSE                      : 0.61365991
1 day(s) R2                        : -0.42777760
1 day(s) Pearson r                 : -0.07696344
1 day(s) QLIKE                     : 2.54016385
full horizon MAE                   : 0.36466276
full horizon RMSE                  : 0.61365991
full horizon R2                    : -0.42777760
full horizon Pearson r             : -0.07696344
full horizon QLIKE                 : 2.54016385

--- Task 5 ---
1 day(s) MAE                       : 0.05068668
1 day(s) RMSE                      : 0.06333863
1 day(s) R2                        : -2.19839770
1 day(s) Pearson r                 : 0.14704874
1 day(s) QLIKE                     : 0.64085866
full horizon MAE                   : 0.05068668
full horizon RMSE                  : 0.06333863
full horizon R2                    : -2.19839770
full horizon Pearson r             : 0.14704874
full horizon QLIKE                 : 0.64085866

--- Task 6 ---
1 day(s) MAE                       : 4.01395767
1 day(s) RMSE                      : 5.36760562
1 day(s) R2                        : -0.35619651
1 day(s) Pearson r                 : -0.06868085
1 day(s) QLIKE                     : 0.16851237
full horizon MAE                   : 4.01395767
full horizon RMSE                  : 5.36760562
full horizon R2                    : -0.35619651
full horizon Pearson r             : -0.06868085
full horizon QLIKE                 : 0.16851237

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/MSFT/Custom_KAN_LSTM_H1.pkl

Saved y_true min=0.27301, max=89.8516
Saved y_pred min=1.31197, max=6.00958

=== MSFT | H=5 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.550928529745955
  Min value:  -0.49195094366183095
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.894907177625173
  Min value:  -4.875740826920772
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9099560750963094
  Min value:  -0.42477627271922275
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.894348562811063
  Min value:  -6.5517309160899515
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.030086059790822
  Min value:  -0.4794650148863312
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.930739994585198
  Min value:  -181.2597467460353
Epoch 001 | phase=1 | train_loss=4.9284 | val_main=0.454810
Epoch 002 | phase=1 | train_loss=4.8908 | val_main=0.454810
Epoch 003 | phase=1 | train_loss=4.7961 | val_main=0.454812
Epoch 004 | phase=1 | train_loss=4.4992 | val_main=0.454762
Epoch 005 | phase=1 | train_loss=4.4809 | val_main=0.454652
Epoch 006 | phase=1 | train_loss=4.2779 | val_main=0.454638
Epoch 007 | phase=1 | train_loss=4.1591 | val_main=0.454616
Epoch 008 | phase=1 | train_loss=3.9276 | val_main=0.454794
Epoch 009 | phase=1 | train_loss=3.5285 | val_main=0.454845
Epoch 010 | phase=1 | train_loss=3.3865 | val_main=0.454796
Epoch 011 | phase=1 | train_loss=3.1399 | val_main=0.454660
Epoch 012 | phase=1 | train_loss=3.0143 | val_main=0.454643
Epoch 013 | phase=1 | train_loss=2.7461 | val_main=0.454714
Epoch 014 | phase=1 | train_loss=2.6315 | val_main=0.454723
Epoch 015 | phase=1 | train_loss=2.5678 | val_main=0.454730
Epoch 016 | phase=0 | train_loss=3.4693 | val_main=0.454742
Epoch 017 | phase=0 | train_loss=3.3169 | val_main=0.449849
Epoch 018 | phase=0 | train_loss=3.1457 | val_main=0.448923
Epoch 019 | phase=0 | train_loss=2.9480 | val_main=0.483934
Epoch 020 | phase=0 | train_loss=2.7609 | val_main=0.500867
Epoch 021 | phase=0 | train_loss=2.7187 | val_main=0.518818
Epoch 022 | phase=0 | train_loss=2.5798 | val_main=0.527641
Epoch 023 | phase=0 | train_loss=2.5613 | val_main=0.538016
Epoch 024 | phase=0 | train_loss=2.4540 | val_main=0.524091
Epoch 025 | phase=0 | train_loss=2.3086 | val_main=0.531437
Epoch 026 | phase=0 | train_loss=2.1858 | val_main=0.563545
Epoch 027 | phase=0 | train_loss=1.9853 | val_main=0.523279
Epoch 028 | phase=0 | train_loss=1.8284 | val_main=0.540988
Epoch 029 | phase=0 | train_loss=1.7129 | val_main=0.526538
Epoch 030 | phase=0 | train_loss=1.6062 | val_main=0.543027
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 30
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.09821793
1 day(s) RMSE                      : 6.75594322
1 day(s) R2                        : -0.01592850
1 day(s) Pearson r                 : 0.06224651
1 day(s) QLIKE                     : 0.50734442
3 day(s) MAE                       : 2.13581495
3 day(s) RMSE                      : 6.77751467
3 day(s) R2                        : -0.02249485
3 day(s) Pearson r                 : 0.03421798
3 day(s) QLIKE                     : 0.51951570
5 day(s) MAE                       : 2.14613885
5 day(s) RMSE                      : 6.80004931
5 day(s) R2                        : -0.02919009
5 day(s) Pearson r                 : 0.00247596
5 day(s) QLIKE                     : 0.52834308
full horizon MAE                   : 2.14613885
full horizon RMSE                  : 6.80004931
full horizon R2                    : -0.02919009
full horizon Pearson r             : 0.00247596
full horizon QLIKE                 : 0.52834308

--- Task 2 ---
1 day(s) MAE                       : 0.10457399
1 day(s) RMSE                      : 0.11175172
1 day(s) R2                        : -12.17150679
1 day(s) Pearson r                 : -0.45163596
1 day(s) QLIKE                     : 15.58442270
3 day(s) MAE                       : 0.10722468
3 day(s) RMSE                      : 0.11544383
3 day(s) R2                        : -13.05621800
3 day(s) Pearson r                 : -0.41900243
3 day(s) QLIKE                     : 15.53356609
5 day(s) MAE                       : 0.10629010
5 day(s) RMSE                      : 0.11490932
5 day(s) R2                        : -12.92635717
5 day(s) Pearson r                 : -0.40526274
5 day(s) QLIKE                     : 15.41846557
full horizon MAE                   : 0.10629010
full horizon RMSE                  : 0.11490932
full horizon R2                    : -12.92635717
full horizon Pearson r             : -0.40526274
full horizon QLIKE                 : 15.41846557

--- Task 3 ---
1 day(s) MAE                       : 0.23542905
1 day(s) RMSE                      : 0.30712081
1 day(s) R2                        : -0.06521764
1 day(s) Pearson r                 : -0.22831725
1 day(s) QLIKE                     : 1.53943646
3 day(s) MAE                       : 0.23509040
3 day(s) RMSE                      : 0.30711437
3 day(s) R2                        : -0.06504860
3 day(s) Pearson r                 : -0.22662700
3 day(s) QLIKE                     : 1.53942477
5 day(s) MAE                       : 0.23491785
5 day(s) RMSE                      : 0.30727692
5 day(s) R2                        : -0.06601649
5 day(s) Pearson r                 : -0.23047965
5 day(s) QLIKE                     : 1.53948441
full horizon MAE                   : 0.23491785
full horizon RMSE                  : 0.30727692
full horizon R2                    : -0.06601649
full horizon Pearson r             : -0.23047965
full horizon QLIKE                 : 1.53948441

--- Task 4 ---
1 day(s) MAE                       : 0.37697990
1 day(s) RMSE                      : 0.59684003
1 day(s) R2                        : -0.35058200
1 day(s) Pearson r                 : -0.26417788
1 day(s) QLIKE                     : 3.51502793
3 day(s) MAE                       : 0.37710069
3 day(s) RMSE                      : 0.59823705
3 day(s) R2                        : -0.35693698
3 day(s) Pearson r                 : -0.27658413
3 day(s) QLIKE                     : 3.46333072
5 day(s) MAE                       : 0.37701253
5 day(s) RMSE                      : 0.59974786
5 day(s) R2                        : -0.36374391
5 day(s) Pearson r                 : -0.28392939
5 day(s) QLIKE                     : 3.41546954
full horizon MAE                   : 0.37701253
full horizon RMSE                  : 0.59974786
full horizon R2                    : -0.36374391
full horizon Pearson r             : -0.28392939
full horizon QLIKE                 : 3.41546954

--- Task 5 ---
1 day(s) MAE                       : 0.05490078
1 day(s) RMSE                      : 0.06889454
1 day(s) R2                        : -2.78411853
1 day(s) Pearson r                 : 0.03097022
1 day(s) QLIKE                     : 0.08823921
3 day(s) MAE                       : 0.05631060
3 day(s) RMSE                      : 0.07047521
3 day(s) R2                        : -2.98246655
3 day(s) Pearson r                 : 0.04490080
3 day(s) QLIKE                     : 0.08940142
5 day(s) MAE                       : 0.05646836
5 day(s) RMSE                      : 0.07041312
5 day(s) R2                        : -2.99321786
5 day(s) Pearson r                 : 0.04776128
5 day(s) QLIKE                     : 0.08943115
full horizon MAE                   : 0.05646836
full horizon RMSE                  : 0.07041312
full horizon R2                    : -2.99321786
full horizon Pearson r             : 0.04776128
full horizon QLIKE                 : 0.08943115

--- Task 6 ---
1 day(s) MAE                       : 4.13323735
1 day(s) RMSE                      : 5.47672063
1 day(s) R2                        : -0.41189565
1 day(s) Pearson r                 : -0.10322093
1 day(s) QLIKE                     : 0.15613679
3 day(s) MAE                       : 4.13728725
3 day(s) RMSE                      : 5.47082995
3 day(s) R2                        : -0.40292940
3 day(s) Pearson r                 : -0.10676298
3 day(s) QLIKE                     : 0.15705181
5 day(s) MAE                       : 4.14777053
5 day(s) RMSE                      : 5.47838545
5 day(s) R2                        : -0.40090624
5 day(s) Pearson r                 : -0.10951048
5 day(s) QLIKE                     : 0.15836593
full horizon MAE                   : 4.14777053
full horizon RMSE                  : 5.47838545
full horizon R2                    : -0.40090624
full horizon Pearson r             : -0.10951048
full horizon QLIKE                 : 0.15836593

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/MSFT/Custom_KAN_H5.pkl

Saved y_true min=0.27301, max=89.8516
Saved y_pred min=1.47062, max=5.42287

=== MSFT | H=5 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2018
Time steps for y: 5
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.550928529745955
  Min value:  -0.49195094366183095
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.894907177625173
  Min value:  -4.875740826920772
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9099560750963094
  Min value:  -0.42477627271922275
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.894348562811063
  Min value:  -6.5517309160899515
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.030086059790822
  Min value:  -0.4794650148863312
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.930739994585198
  Min value:  -181.2597467460353
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 103s 412ms/step - loss: 0.7809 - val_loss: 0.5309 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 240ms/step - loss: 0.4505 - val_loss: 0.6013 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 239ms/step - loss: 0.3529 - val_loss: 0.5288 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 244ms/step - loss: 0.3280 - val_loss: 0.6104 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 250ms/step - loss: 0.3121 - val_loss: 0.5275 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 240ms/step - loss: 0.3035 - val_loss: 0.5741 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 12s 252ms/step - loss: 0.2977 - val_loss: 0.5831 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 239ms/step - loss: 0.2952 - val_loss: 0.6283 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 247ms/step - loss: 0.2875 - val_loss: 0.6308 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=10 tf_ratio=0.526 -> TF=ON
Epoch 10/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 0s 232ms/step - loss: 0.2892
Epoch 10: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 240ms/step - loss: 0.2855 - val_loss: 0.6239 - learning_rate: 5.0000e-04
Epoch 10: early stopping
Restoring model weights from the end of the best epoch: 5.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 5
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 1.98036567
1 day(s) RMSE                      : 6.61727798
1 day(s) R2                        : 0.02534724
1 day(s) Pearson r                 : 0.23955943
1 day(s) QLIKE                     : 0.37725368
3 day(s) MAE                       : 2.09657640
3 day(s) RMSE                      : 6.65119007
3 day(s) R2                        : 0.01526604
3 day(s) Pearson r                 : 0.20438250
3 day(s) QLIKE                     : 0.40895173
5 day(s) MAE                       : 2.14978118
5 day(s) RMSE                      : 6.68645570
5 day(s) R2                        : 0.00490758
5 day(s) Pearson r                 : 0.16457869
5 day(s) QLIKE                     : 0.43332651
full horizon MAE                   : 2.14978118
full horizon RMSE                  : 6.68645570
full horizon R2                    : 0.00490758
full horizon Pearson r             : 0.16457869
full horizon QLIKE                 : 0.43332651

--- Task 2 ---
1 day(s) MAE                       : 0.02063631
1 day(s) RMSE                      : 0.03178594
1 day(s) R2                        : -0.06560738
1 day(s) Pearson r                 : 0.10381133
1 day(s) QLIKE                     : 13.32515492
3 day(s) MAE                       : 0.01967751
3 day(s) RMSE                      : 0.03358416
3 day(s) R2                        : -0.18958661
3 day(s) Pearson r                 : 0.07189903
3 day(s) QLIKE                     : 26.08101808
5 day(s) MAE                       : 0.01933705
5 day(s) RMSE                      : 0.03453911
5 day(s) R2                        : -0.25819918
5 day(s) Pearson r                 : 0.04870982
5 day(s) QLIKE                     : 26.49248102
full horizon MAE                   : 0.01933705
full horizon RMSE                  : 0.03453911
full horizon R2                    : -0.25819918
full horizon Pearson r             : 0.04870982
full horizon QLIKE                 : 26.49248102

--- Task 3 ---
1 day(s) MAE                       : 0.21854148
1 day(s) RMSE                      : 0.30355890
1 day(s) R2                        : -0.04065267
1 day(s) Pearson r                 : -0.09336665
1 day(s) QLIKE                     : 1.53511192
3 day(s) MAE                       : 0.21563843
3 day(s) RMSE                      : 0.31230766
3 day(s) R2                        : -0.10137298
3 day(s) Pearson r                 : -0.00255283
3 day(s) QLIKE                     : 1.53545152
5 day(s) MAE                       : 0.21425954
5 day(s) RMSE                      : 0.31559122
5 day(s) R2                        : -0.12448548
5 day(s) Pearson r                 : 0.01015167
5 day(s) QLIKE                     : 1.53532818
full horizon MAE                   : 0.21425954
full horizon RMSE                  : 0.31559122
full horizon R2                    : -0.12448548
full horizon Pearson r             : 0.01015167
full horizon QLIKE                 : 1.53532818

--- Task 4 ---
1 day(s) MAE                       : 0.36282261
1 day(s) RMSE                      : 0.60337449
1 day(s) R2                        : -0.38031738
1 day(s) Pearson r                 : -0.08608123
1 day(s) QLIKE                     : 2.56695935
3 day(s) MAE                       : 0.36236775
3 day(s) RMSE                      : 0.59545033
3 day(s) R2                        : -0.34432463
3 day(s) Pearson r                 : -0.09259660
3 day(s) QLIKE                     : 2.58017797
5 day(s) MAE                       : 0.35923935
5 day(s) RMSE                      : 0.58719914
5 day(s) R2                        : -0.30727278
5 day(s) Pearson r                 : -0.07097061
5 day(s) QLIKE                     : 2.57142537
full horizon MAE                   : 0.35923935
full horizon RMSE                  : 0.58719914
full horizon R2                    : -0.30727278
full horizon Pearson r             : -0.07097061
full horizon QLIKE                 : 2.57142537

--- Task 5 ---
1 day(s) MAE                       : 0.09905877
1 day(s) RMSE                      : 0.10514211
1 day(s) R2                        : -7.81350174
1 day(s) Pearson r                 : 0.19525103
1 day(s) QLIKE                     : 0.59820974
3 day(s) MAE                       : 0.09842530
3 day(s) RMSE                      : 0.10516443
3 day(s) R2                        : -7.86782471
3 day(s) Pearson r                 : 0.18780890
3 day(s) QLIKE                     : 9.11132831
5 day(s) MAE                       : 0.09382319
5 day(s) RMSE                      : 0.10358125
5 day(s) R2                        : -7.64128164
5 day(s) Pearson r                 : 0.13478433
5 day(s) QLIKE                     : 8.65549636
full horizon MAE                   : 0.09382319
full horizon RMSE                  : 0.10358125
full horizon R2                    : -7.64128164
full horizon Pearson r             : 0.13478433
full horizon QLIKE                 : 8.65549636

--- Task 6 ---
1 day(s) MAE                       : 4.22256008
1 day(s) RMSE                      : 5.62791062
1 day(s) R2                        : -0.49092504
1 day(s) Pearson r                 : -0.08280985
1 day(s) QLIKE                     : 0.18217767
3 day(s) MAE                       : 4.20813777
3 day(s) RMSE                      : 5.61196536
3 day(s) R2                        : -0.47624810
3 day(s) Pearson r                 : -0.09535613
3 day(s) QLIKE                     : 0.18626596
5 day(s) MAE                       : 4.27341987
5 day(s) RMSE                      : 5.66794150
5 day(s) R2                        : -0.49952814
5 day(s) Pearson r                 : -0.10160203
5 day(s) QLIKE                     : 0.19270526
full horizon MAE                   : 4.27341987
full horizon RMSE                  : 5.66794150
full horizon R2                    : -0.49952814
full horizon Pearson r             : -0.10160203
full horizon QLIKE                 : 0.19270526

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/MSFT/Custom_KAN_LSTM_H5.pkl

Saved y_true min=0.27301, max=89.8516
Saved y_pred min=1.03588, max=5.66104

=== MSFT | H=10 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 10
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.550928529745955
  Min value:  -0.49195094366183095
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.898543949269212
  Min value:  -4.873616493745474
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9099560750963094
  Min value:  -0.42477627271922275
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.895260784986747
  Min value:  -6.552726793435114
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.030086059790822
  Min value:  -0.4794650148863312
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9328875355250386
  Min value:  -181.27145471977147
Epoch 001 | phase=1 | train_loss=4.9319 | val_main=0.451001
Epoch 002 | phase=1 | train_loss=4.8942 | val_main=0.450999
Epoch 003 | phase=1 | train_loss=4.7950 | val_main=0.450947
Epoch 004 | phase=1 | train_loss=4.4929 | val_main=0.450689
Epoch 005 | phase=1 | train_loss=4.4594 | val_main=0.450529
Epoch 006 | phase=1 | train_loss=4.2565 | val_main=0.450570
Epoch 007 | phase=1 | train_loss=4.1498 | val_main=0.450507
Epoch 008 | phase=1 | train_loss=3.9095 | val_main=0.450845
Epoch 009 | phase=1 | train_loss=3.5291 | val_main=0.450791
Epoch 010 | phase=1 | train_loss=3.3822 | val_main=0.450848
Epoch 011 | phase=1 | train_loss=3.1015 | val_main=0.450701
Epoch 012 | phase=1 | train_loss=2.9987 | val_main=0.450675
Epoch 013 | phase=1 | train_loss=2.7387 | val_main=0.450872
Epoch 014 | phase=1 | train_loss=2.6643 | val_main=0.450999
Epoch 015 | phase=1 | train_loss=2.5903 | val_main=0.450970
Epoch 016 | phase=0 | train_loss=3.4731 | val_main=0.450999
Epoch 017 | phase=0 | train_loss=3.3669 | val_main=0.445380
Epoch 018 | phase=0 | train_loss=3.2484 | val_main=0.439788
Epoch 019 | phase=0 | train_loss=3.0746 | val_main=0.467828
Epoch 020 | phase=0 | train_loss=2.9311 | val_main=0.492395
Epoch 021 | phase=0 | train_loss=2.9176 | val_main=0.516377
Epoch 022 | phase=0 | train_loss=2.8413 | val_main=0.532712
Epoch 023 | phase=0 | train_loss=2.6822 | val_main=0.543461
Epoch 024 | phase=0 | train_loss=2.5503 | val_main=0.536404
Epoch 025 | phase=0 | train_loss=2.4472 | val_main=0.563700
Epoch 026 | phase=0 | train_loss=2.3855 | val_main=0.567693
Epoch 027 | phase=0 | train_loss=2.8886 | val_main=0.545082
Epoch 028 | phase=0 | train_loss=2.3808 | val_main=0.556759
Epoch 029 | phase=0 | train_loss=2.2280 | val_main=0.537515
Epoch 030 | phase=0 | train_loss=2.0416 | val_main=0.544788
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 60
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.05994218
1 day(s) RMSE                      : 6.75480872
1 day(s) R2                        : -0.01558732
1 day(s) Pearson r                 : 0.06498680
1 day(s) QLIKE                     : 0.49252290
3 day(s) MAE                       : 2.08826907
3 day(s) RMSE                      : 6.77253359
3 day(s) R2                        : -0.02099245
3 day(s) Pearson r                 : 0.04144024
3 day(s) QLIKE                     : 0.50364680
5 day(s) MAE                       : 2.11437073
5 day(s) RMSE                      : 6.80084718
5 day(s) R2                        : -0.02943162
5 day(s) Pearson r                 : 0.00362447
5 day(s) QLIKE                     : 0.52139784
10 day(s) MAE                      : 2.13950557
10 day(s) RMSE                     : 6.81474993
10 day(s) R2                       : -0.03356899
10 day(s) Pearson r                : -0.01936160
10 day(s) QLIKE                    : 0.51913530
full horizon MAE                   : 2.13950557
full horizon RMSE                  : 6.81474993
full horizon R2                    : -0.03356899
full horizon Pearson r             : -0.01936160
full horizon QLIKE                 : 0.51913530

--- Task 2 ---
1 day(s) MAE                       : 0.11644080
1 day(s) RMSE                      : 0.12634868
1 day(s) R2                        : -15.83714529
1 day(s) Pearson r                 : -0.38795880
1 day(s) QLIKE                     : 15.59223888
3 day(s) MAE                       : 0.11790015
3 day(s) RMSE                      : 0.12741750
3 day(s) R2                        : -16.12320895
3 day(s) Pearson r                 : -0.37447169
3 day(s) QLIKE                     : 15.42212447
5 day(s) MAE                       : 0.11290460
5 day(s) RMSE                      : 0.12263983
5 day(s) R2                        : -14.86317456
5 day(s) Pearson r                 : -0.36375938
5 day(s) QLIKE                     : 15.38085554
10 day(s) MAE                      : 0.10436352
10 day(s) RMSE                     : 0.11449461
10 day(s) R2                       : -12.82601827
10 day(s) Pearson r                : -0.33365478
10 day(s) QLIKE                    : 15.17646493
full horizon MAE                   : 0.10436352
full horizon RMSE                  : 0.11449461
full horizon R2                    : -12.82601827
full horizon Pearson r             : -0.33365478
full horizon QLIKE                 : 15.17646493

--- Task 3 ---
1 day(s) MAE                       : 0.23539319
1 day(s) RMSE                      : 0.30691593
1 day(s) R2                        : -0.06379689
1 day(s) Pearson r                 : -0.20733689
1 day(s) QLIKE                     : 1.53933859
3 day(s) MAE                       : 0.23503912
3 day(s) RMSE                      : 0.30688382
3 day(s) R2                        : -0.06345009
3 day(s) Pearson r                 : -0.20537529
3 day(s) QLIKE                     : 1.53931352
5 day(s) MAE                       : 0.23491299
5 day(s) RMSE                      : 0.30710162
5 day(s) R2                        : -0.06480049
5 day(s) Pearson r                 : -0.20891686
5 day(s) QLIKE                     : 1.53940358
10 day(s) MAE                      : 0.23405800
10 day(s) RMSE                     : 0.30673279
10 day(s) R2                       : -0.06182585
10 day(s) Pearson r                : -0.19593420
10 day(s) QLIKE                    : 1.53918451
full horizon MAE                   : 0.23405800
full horizon RMSE                  : 0.30673279
full horizon R2                    : -0.06182585
full horizon Pearson r             : -0.19593420
full horizon QLIKE                 : 1.53918451

--- Task 4 ---
1 day(s) MAE                       : 0.37932764
1 day(s) RMSE                      : 0.59811229
1 day(s) R2                        : -0.35634607
1 day(s) Pearson r                 : -0.26761431
1 day(s) QLIKE                     : 3.47474227
3 day(s) MAE                       : 0.37921058
3 day(s) RMSE                      : 0.59968921
3 day(s) R2                        : -0.36353263
3 day(s) Pearson r                 : -0.27620739
3 day(s) QLIKE                     : 3.44532146
5 day(s) MAE                       : 0.37926206
5 day(s) RMSE                      : 0.60086265
5 day(s) R2                        : -0.36881837
5 day(s) Pearson r                 : -0.27829093
5 day(s) QLIKE                     : 3.41553725
10 day(s) MAE                      : 0.37806249
10 day(s) RMSE                     : 0.60266757
10 day(s) R2                       : -0.37683651
10 day(s) Pearson r                : -0.26144637
10 day(s) QLIKE                    : 3.29775869
full horizon MAE                   : 0.37806249
full horizon RMSE                  : 0.60266757
full horizon R2                    : -0.37683651
full horizon Pearson r             : -0.26144637
full horizon QLIKE                 : 3.29775869

--- Task 5 ---
1 day(s) MAE                       : 0.05302796
1 day(s) RMSE                      : 0.06766426
1 day(s) R2                        : -2.65017596
1 day(s) Pearson r                 : 0.15579296
1 day(s) QLIKE                     : 0.10977132
3 day(s) MAE                       : 0.05299013
3 day(s) RMSE                      : 0.06815902
3 day(s) R2                        : -2.72499734
3 day(s) Pearson r                 : 0.12964519
3 day(s) QLIKE                     : 0.10896491
5 day(s) MAE                       : 0.05462641
5 day(s) RMSE                      : 0.07108693
5 day(s) R2                        : -3.07000891
5 day(s) Pearson r                 : 0.10016994
5 day(s) QLIKE                     : 0.10977313
10 day(s) MAE                      : 0.05403190
10 day(s) RMSE                     : 0.07059481
10 day(s) R2                       : -3.04097647
10 day(s) Pearson r                : 0.10155400
10 day(s) QLIKE                    : 0.11469595
full horizon MAE                   : 0.05403190
full horizon RMSE                  : 0.07059481
full horizon R2                    : -3.04097647
full horizon Pearson r             : 0.10155400
full horizon QLIKE                 : 0.11469595

--- Task 6 ---
1 day(s) MAE                       : 4.07316342
1 day(s) RMSE                      : 5.43676637
1 day(s) R2                        : -0.39137042
1 day(s) Pearson r                 : -0.07723529
1 day(s) QLIKE                     : 0.15565903
3 day(s) MAE                       : 4.07519902
3 day(s) RMSE                      : 5.42541427
3 day(s) R2                        : -0.37973347
3 day(s) Pearson r                 : -0.08146839
3 day(s) QLIKE                     : 0.15663308
5 day(s) MAE                       : 4.08002405
5 day(s) RMSE                      : 5.42795507
5 day(s) R2                        : -0.37523333
5 day(s) Pearson r                 : -0.08454208
5 day(s) QLIKE                     : 0.15814245
10 day(s) MAE                      : 4.11398643
10 day(s) RMSE                     : 5.45584405
10 day(s) R2                       : -0.37481595
10 day(s) Pearson r                : -0.09447324
10 day(s) QLIKE                    : 0.16207371
full horizon MAE                   : 4.11398643
full horizon RMSE                  : 5.45584405
full horizon R2                    : -0.37481595
full horizon Pearson r             : -0.09447324
full horizon QLIKE                 : 0.16207371

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/MSFT/Custom_KAN_H10.pkl

Saved y_true min=0.27301, max=89.8516
Saved y_pred min=1.41726, max=5.00394

=== MSFT | H=10 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2018
Time steps for y: 10
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.550928529745955
  Min value:  -0.49195094366183095
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.898543949269212
  Min value:  -4.873616493745474
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9099560750963094
  Min value:  -0.42477627271922275
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.895260784986747
  Min value:  -6.552726793435114
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.030086059790822
  Min value:  -0.4794650148863312
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9328875355250386
  Min value:  -181.27145471977147
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 94s 397ms/step - loss: 0.7314 - val_loss: 0.5029 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 247ms/step - loss: 0.3720 - val_loss: 0.6175 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 15s 321ms/step - loss: 0.2873 - val_loss: 0.7338 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 16s 348ms/step - loss: 0.2638 - val_loss: 0.5871 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 15s 329ms/step - loss: 0.2476 - val_loss: 0.6477 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 0s 313ms/step - loss: 0.2472
Epoch 6: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
46/46 ━━━━━━━━━━━━━━━━━━━━ 15s 323ms/step - loss: 0.2385 - val_loss: 0.7068 - learning_rate: 5.0000e-04
Epoch 6: early stopping
Restoring model weights from the end of the best epoch: 1.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 10
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.11804880
1 day(s) RMSE                      : 6.74060814
1 day(s) R2                        : -0.01132169
1 day(s) Pearson r                 : 0.26091232
1 day(s) QLIKE                     : 0.47176355
3 day(s) MAE                       : 2.17090216
3 day(s) RMSE                      : 6.72827842
3 day(s) R2                        : -0.00769268
3 day(s) Pearson r                 : 0.02745899
3 day(s) QLIKE                     : 0.47090628
5 day(s) MAE                       : 2.22431311
5 day(s) RMSE                      : 6.72202983
5 day(s) R2                        : -0.00570902
5 day(s) Pearson r                 : 0.01401513
5 day(s) QLIKE                     : 0.47205390
10 day(s) MAE                      : 2.34567283
10 day(s) RMSE                     : 6.71323546
10 day(s) R2                       : -0.00300566
10 day(s) Pearson r                : 0.00605339
10 day(s) QLIKE                    : 0.47443249
full horizon MAE                   : 2.34567283
full horizon RMSE                  : 6.71323546
full horizon R2                    : -0.00300566
full horizon Pearson r             : 0.00605339
full horizon QLIKE                 : 0.47443249

--- Task 2 ---
1 day(s) MAE                       : 0.01944005
1 day(s) RMSE                      : 0.03447548
1 day(s) R2                        : -0.25356729
1 day(s) Pearson r                 : 0.04354546
1 day(s) QLIKE                     : 13.11242010
3 day(s) MAE                       : 0.01917811
3 day(s) RMSE                      : 0.03511427
3 day(s) R2                        : -0.30045177
3 day(s) Pearson r                 : 0.00255811
3 day(s) QLIKE                     : 15.44142370
5 day(s) MAE                       : 0.01902542
5 day(s) RMSE                      : 0.03550190
5 day(s) R2                        : -0.32932210
5 day(s) Pearson r                 : 0.00148499
5 day(s) QLIKE                     : 19.21522452
10 day(s) MAE                      : 0.01891091
10 day(s) RMSE                     : 0.03578987
10 day(s) R2                       : -0.35097523
10 day(s) Pearson r                : 0.00091092
10 day(s) QLIKE                    : 17.77433360
full horizon MAE                   : 0.01891091
full horizon RMSE                  : 0.03578987
full horizon R2                    : -0.35097523
full horizon Pearson r             : 0.00091092
full horizon QLIKE                 : 17.77433360

--- Task 3 ---
1 day(s) MAE                       : 0.21804675
1 day(s) RMSE                      : 0.30217906
1 day(s) R2                        : -0.03121350
1 day(s) Pearson r                 : -0.05189082
1 day(s) QLIKE                     : 1.53488363
3 day(s) MAE                       : 0.21644290
3 day(s) RMSE                      : 0.30600275
3 day(s) R2                        : -0.05735252
3 day(s) Pearson r                 : -0.00335207
3 day(s) QLIKE                     : 1.53504425
5 day(s) MAE                       : 0.21539572
5 day(s) RMSE                      : 0.31326338
5 day(s) R2                        : -0.10795799
5 day(s) Pearson r                 : -0.00260003
5 day(s) QLIKE                     : 1.53556103
10 day(s) MAE                      : 0.21548148
10 day(s) RMSE                     : 0.32692067
10 day(s) R2                       : -0.20619534
10 day(s) Pearson r                : -0.00103685
10 day(s) QLIKE                    : 1.53609745
full horizon MAE                   : 0.21548148
full horizon RMSE                  : 0.32692067
full horizon R2                    : -0.20619534
full horizon Pearson r             : -0.00103685
full horizon QLIKE                 : 1.53609745

--- Task 4 ---
1 day(s) MAE                       : 0.36377273
1 day(s) RMSE                      : 0.61745612
1 day(s) R2                        : -0.44549725
1 day(s) Pearson r                 : -0.06520453
1 day(s) QLIKE                     : 2.41572773
3 day(s) MAE                       : 0.36827664
3 day(s) RMSE                      : 0.62501732
3 day(s) R2                        : -0.48114362
3 day(s) Pearson r                 : -0.00206695
3 day(s) QLIKE                     : 2.47082000
5 day(s) MAE                       : 0.37035569
5 day(s) RMSE                      : 0.62793196
5 day(s) R2                        : -0.49492905
5 day(s) Pearson r                 : -0.00353760
5 day(s) QLIKE                     : 2.47967409
10 day(s) MAE                      : 0.37406117
10 day(s) RMSE                     : 0.63216294
10 day(s) R2                       : -0.51490289
10 day(s) Pearson r                : -0.00204776
10 day(s) QLIKE                    : 2.62518115
full horizon MAE                   : 0.37406117
full horizon RMSE                  : 0.63216294
full horizon R2                    : -0.51490289
full horizon Pearson r             : -0.00204776
full horizon QLIKE                 : 2.62518115

--- Task 5 ---
1 day(s) MAE                       : 0.12346026
1 day(s) RMSE                      : 0.12840370
1 day(s) R2                        : -12.14468489
1 day(s) Pearson r                 : 0.13032235
1 day(s) QLIKE                     : 0.07835040
3 day(s) MAE                       : 0.12370110
3 day(s) RMSE                      : 0.12860083
3 day(s) R2                        : -12.26071172
3 day(s) Pearson r                 : 0.05550166
3 day(s) QLIKE                     : 0.08851274
5 day(s) MAE                       : 0.12465564
5 day(s) RMSE                      : 0.12950882
5 day(s) R2                        : -12.50873216
5 day(s) Pearson r                 : 0.02966255
5 day(s) QLIKE                     : 0.12637769
10 day(s) MAE                      : 0.12721281
10 day(s) RMSE                     : 0.13198207
10 day(s) R2                       : -13.12442191
10 day(s) Pearson r                : 0.00257644
10 day(s) QLIKE                    : 1.19121509
full horizon MAE                   : 0.12721281
full horizon RMSE                  : 0.13198207
full horizon R2                    : -13.12442191
full horizon Pearson r             : 0.00257644
full horizon QLIKE                 : 1.19121509

--- Task 6 ---
1 day(s) MAE                       : 3.49673293
1 day(s) RMSE                      : 4.68913420
1 day(s) R2                        : -0.03501525
1 day(s) Pearson r                 : -0.06193903
1 day(s) QLIKE                     : 0.14313466
3 day(s) MAE                       : 3.70897268
3 day(s) RMSE                      : 4.86365790
3 day(s) R2                        : -0.10880564
3 day(s) Pearson r                 : -0.00618381
3 day(s) QLIKE                     : 0.15296510
5 day(s) MAE                       : 5.46204709
5 day(s) RMSE                      : 6.82319798
5 day(s) R2                        : -1.17310067
5 day(s) Pearson r                 : -0.00602182
5 day(s) QLIKE                     : 0.18445616
10 day(s) MAE                      : 10.30444504
10 day(s) RMSE                     : 12.22188440
10 day(s) R2                       : -5.89917258
10 day(s) Pearson r                : -0.00912721
10 day(s) QLIKE                    : 0.23221389
full horizon MAE                   : 10.30444504
full horizon RMSE                  : 12.22188440
full horizon R2                    : -5.89917258
full horizon Pearson r             : -0.00912721
full horizon QLIKE                 : 0.23221389

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/MSFT/Custom_KAN_LSTM_H10.pkl

Saved y_true min=0.27301, max=89.8516
Saved y_pred min=2.46108, max=3.27167

=== MSFT | H=20 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 20
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.550928529745955
  Min value:  -0.49195094366183095
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.90673127244724
  Min value:  -4.869753815405025
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9099560750963094
  Min value:  -0.42477627271922275
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.8916888397222356
  Min value:  -6.561724659722739
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.030086059790822
  Min value:  -0.4794650148863312
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9300069821373937
  Min value:  -181.45606985742796
Epoch 001 | phase=1 | train_loss=4.9332 | val_main=0.425176
Epoch 002 | phase=1 | train_loss=4.9002 | val_main=0.425179
Epoch 003 | phase=1 | train_loss=4.8421 | val_main=0.425156
Epoch 004 | phase=1 | train_loss=4.5307 | val_main=0.424818
Epoch 005 | phase=1 | train_loss=4.4701 | val_main=0.424535
Epoch 006 | phase=1 | train_loss=4.2419 | val_main=0.424733
Epoch 007 | phase=1 | train_loss=4.1402 | val_main=0.424733
Epoch 008 | phase=1 | train_loss=3.8685 | val_main=0.425054
Epoch 009 | phase=1 | train_loss=3.5741 | val_main=0.424977
Epoch 010 | phase=1 | train_loss=3.4012 | val_main=0.425126
Epoch 011 | phase=1 | train_loss=3.1451 | val_main=0.425065
Epoch 012 | phase=1 | train_loss=3.0649 | val_main=0.424925
Epoch 013 | phase=1 | train_loss=2.8159 | val_main=0.425108
Epoch 014 | phase=1 | train_loss=2.7693 | val_main=0.425183
Epoch 015 | phase=1 | train_loss=2.7085 | val_main=0.425189
Epoch 016 | phase=0 | train_loss=3.5951 | val_main=0.425742
Epoch 017 | phase=0 | train_loss=3.5045 | val_main=0.423737
Epoch 018 | phase=0 | train_loss=3.4105 | val_main=0.419710
Epoch 019 | phase=0 | train_loss=3.2499 | val_main=0.451063
Epoch 020 | phase=0 | train_loss=3.1324 | val_main=0.475064
Epoch 021 | phase=0 | train_loss=3.1562 | val_main=0.489739
Epoch 022 | phase=0 | train_loss=3.0883 | val_main=0.502088
Epoch 023 | phase=0 | train_loss=2.9975 | val_main=0.507449
Epoch 024 | phase=0 | train_loss=2.9872 | val_main=0.530195
Epoch 025 | phase=0 | train_loss=2.9733 | val_main=0.558238
Epoch 026 | phase=0 | train_loss=2.9358 | val_main=0.527366
Epoch 027 | phase=0 | train_loss=2.7707 | val_main=0.550965
Epoch 028 | phase=0 | train_loss=2.7176 | val_main=0.609336
Epoch 029 | phase=0 | train_loss=2.6449 | val_main=0.560673
Epoch 030 | phase=0 | train_loss=2.5541 | val_main=0.537875
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 120
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.07032996
1 day(s) RMSE                      : 6.75243695
1 day(s) R2                        : -0.01487426
1 day(s) Pearson r                 : 0.07028341
1 day(s) QLIKE                     : 0.48463290
3 day(s) MAE                       : 2.06625132
3 day(s) RMSE                      : 6.76502843
3 day(s) R2                        : -0.01873083
3 day(s) Pearson r                 : 0.04605737
3 day(s) QLIKE                     : 0.48598563
5 day(s) MAE                       : 2.08538531
5 day(s) RMSE                      : 6.78586626
5 day(s) R2                        : -0.02490135
5 day(s) Pearson r                 : 0.01262888
5 day(s) QLIKE                     : 0.49817016
10 day(s) MAE                      : 2.11895728
10 day(s) RMSE                     : 6.80664101
10 day(s) R2                       : -0.03111075
10 day(s) Pearson r                : -0.01740762
10 day(s) QLIKE                    : 0.50613154
20 day(s) MAE                      : 2.17312796
20 day(s) RMSE                     : 7.00877191
20 day(s) R2                       : -0.03016894
20 day(s) Pearson r                : -0.01399952
20 day(s) QLIKE                    : 0.50942814
full horizon MAE                   : 2.17312796
full horizon RMSE                  : 7.00877191
full horizon R2                    : -0.03016894
full horizon Pearson r             : -0.01399952
full horizon QLIKE                 : 0.50942814

--- Task 2 ---
1 day(s) MAE                       : 0.12175565
1 day(s) RMSE                      : 0.13456099
1 day(s) R2                        : -18.09701005
1 day(s) Pearson r                 : -0.36954693
1 day(s) QLIKE                     : 14.88330357
3 day(s) MAE                       : 0.12018755
3 day(s) RMSE                      : 0.13357548
3 day(s) R2                        : -17.81830486
3 day(s) Pearson r                 : -0.34839402
3 day(s) QLIKE                     : 14.82031616
5 day(s) MAE                       : 0.12096849
5 day(s) RMSE                      : 0.13467366
5 day(s) R2                        : -18.12900528
5 day(s) Pearson r                 : -0.33399897
5 day(s) QLIKE                     : 14.77835817
10 day(s) MAE                      : 0.11081658
10 day(s) RMSE                     : 0.12419445
10 day(s) R2                       : -15.26789558
10 day(s) Pearson r                : -0.28505602
10 day(s) QLIKE                    : 14.63115543
20 day(s) MAE                      : 0.09344805
20 day(s) RMSE                     : 0.10796179
20 day(s) R2                       : -11.30062717
20 day(s) Pearson r                : -0.17465136
20 day(s) QLIKE                    : 14.24978028
full horizon MAE                   : 0.09344805
full horizon RMSE                  : 0.10796179
full horizon R2                    : -11.30062717
full horizon Pearson r             : -0.17465136
full horizon QLIKE                 : 14.24978028

--- Task 3 ---
1 day(s) MAE                       : 0.23805396
1 day(s) RMSE                      : 0.30967289
1 day(s) R2                        : -0.08299445
1 day(s) Pearson r                 : -0.22335583
1 day(s) QLIKE                     : 1.54051584
3 day(s) MAE                       : 0.23743387
3 day(s) RMSE                      : 0.30974245
3 day(s) R2                        : -0.08335454
3 day(s) Pearson r                 : -0.22597406
3 day(s) QLIKE                     : 1.54052621
5 day(s) MAE                       : 0.23700739
5 day(s) RMSE                      : 0.30977584
5 day(s) R2                        : -0.08342569
5 day(s) Pearson r                 : -0.22562679
5 day(s) QLIKE                     : 1.54051977
10 day(s) MAE                      : 0.23554031
10 day(s) RMSE                     : 0.30889867
10 day(s) R2                       : -0.07687420
10 day(s) Pearson r                : -0.20427875
10 day(s) QLIKE                    : 1.54005809
20 day(s) MAE                      : 0.23284303
20 day(s) RMSE                     : 0.30806627
20 day(s) R2                       : -0.07104812
20 day(s) Pearson r                : -0.18062353
20 day(s) QLIKE                    : 1.53957974
full horizon MAE                   : 0.23284303
full horizon RMSE                  : 0.30806627
full horizon R2                    : -0.07104812
full horizon Pearson r             : -0.18062353
full horizon QLIKE                 : 1.53957974

--- Task 4 ---
1 day(s) MAE                       : 0.38070263
1 day(s) RMSE                      : 0.60856164
1 day(s) R2                        : -0.40415229
1 day(s) Pearson r                 : -0.32737690
1 day(s) QLIKE                     : 3.15913931
3 day(s) MAE                       : 0.38061183
3 day(s) RMSE                      : 0.60936838
3 day(s) R2                        : -0.40790355
3 day(s) Pearson r                 : -0.32674117
3 day(s) QLIKE                     : 3.14406199
5 day(s) MAE                       : 0.38053155
5 day(s) RMSE                      : 0.60950199
5 day(s) R2                        : -0.40846372
5 day(s) Pearson r                 : -0.31862185
5 day(s) QLIKE                     : 3.14414242
10 day(s) MAE                      : 0.37894013
10 day(s) RMSE                     : 0.60900300
10 day(s) R2                       : -0.40593613
10 day(s) Pearson r                : -0.28746183
10 day(s) QLIKE                    : 3.10015440
20 day(s) MAE                      : 0.37689161
20 day(s) RMSE                     : 0.61044123
20 day(s) R2                       : -0.41315953
20 day(s) Pearson r                : -0.27173998
20 day(s) QLIKE                    : 3.03407174
full horizon MAE                   : 0.37689161
full horizon RMSE                  : 0.61044123
full horizon R2                    : -0.41315953
full horizon Pearson r             : -0.27173998
full horizon QLIKE                 : 3.03407174

--- Task 5 ---
1 day(s) MAE                       : 0.04795582
1 day(s) RMSE                      : 0.06062639
1 day(s) R2                        : -1.93034298
1 day(s) Pearson r                 : 0.12717250
1 day(s) QLIKE                     : 0.13877042
3 day(s) MAE                       : 0.05675102
3 day(s) RMSE                      : 0.07467591
3 day(s) R2                        : -3.47136772
3 day(s) Pearson r                 : 0.10253219
3 day(s) QLIKE                     : 0.14744881
5 day(s) MAE                       : 0.06066590
5 day(s) RMSE                      : 0.07960991
5 day(s) R2                        : -4.10446376
5 day(s) Pearson r                 : 0.08720359
5 day(s) QLIKE                     : 0.16696606
10 day(s) MAE                      : 0.05743160
10 day(s) RMSE                     : 0.07387950
10 day(s) R2                       : -3.42576795
10 day(s) Pearson r                : 0.09247784
10 day(s) QLIKE                    : 0.17658716
20 day(s) MAE                      : 0.05822765
20 day(s) RMSE                     : 0.07362033
20 day(s) R2                       : -3.36106467
20 day(s) Pearson r                : 0.09174663
20 day(s) QLIKE                    : 0.23381683
full horizon MAE                   : 0.05822765
full horizon RMSE                  : 0.07362033
full horizon R2                    : -3.36106467
full horizon Pearson r             : 0.09174663
full horizon QLIKE                 : 0.23381683

--- Task 6 ---
1 day(s) MAE                       : 4.07569941
1 day(s) RMSE                      : 5.54103887
1 day(s) R2                        : -0.44525280
1 day(s) Pearson r                 : -0.05902290
1 day(s) QLIKE                     : 0.15899659
3 day(s) MAE                       : 4.10113847
3 day(s) RMSE                      : 5.56355318
3 day(s) R2                        : -0.45088795
3 day(s) Pearson r                 : -0.06112668
3 day(s) QLIKE                     : 0.16045445
5 day(s) MAE                       : 4.11147252
5 day(s) RMSE                      : 5.56425683
5 day(s) R2                        : -0.44516767
5 day(s) Pearson r                 : -0.06424020
5 day(s) QLIKE                     : 0.16161143
10 day(s) MAE                      : 4.15294003
10 day(s) RMSE                     : 5.60113909
10 day(s) R2                       : -0.44901666
10 day(s) Pearson r                : -0.07773766
10 day(s) QLIKE                    : 0.16596920
20 day(s) MAE                      : 4.25121070
20 day(s) RMSE                     : 5.67243087
20 day(s) R2                       : -0.45493012
20 day(s) Pearson r                : -0.10263408
20 day(s) QLIKE                    : 0.17492979
full horizon MAE                   : 4.25121070
full horizon RMSE                  : 5.67243087
full horizon R2                    : -0.45493012
full horizon Pearson r             : -0.10263408
full horizon QLIKE                 : 0.17492979

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/MSFT/Custom_KAN_H20.pkl

Saved y_true min=0.27301, max=89.8516
Saved y_pred min=1.52378, max=4.95886

=== MSFT | H=20 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2018
Time steps for y: 20
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.550928529745955
  Min value:  -0.49195094366183095
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.90673127244724
  Min value:  -4.869753815405025
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9099560750963094
  Min value:  -0.42477627271922275
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.8916888397222356
  Min value:  -6.561724659722739
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.030086059790822
  Min value:  -0.4794650148863312
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9300069821373937
  Min value:  -181.45606985742796
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 104s 584ms/step - loss: 0.7231 - val_loss: 0.4606 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 16s 350ms/step - loss: 0.3363 - val_loss: 0.6615 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 462ms/step - loss: 0.2532 - val_loss: 0.7196 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 461ms/step - loss: 0.2255 - val_loss: 0.5380 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 23s 491ms/step - loss: 0.2267 - val_loss: 0.4307 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 22s 479ms/step - loss: 0.2102 - val_loss: 0.4241 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 23s 495ms/step - loss: 0.2041 - val_loss: 0.5022 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 465ms/step - loss: 0.1938 - val_loss: 0.4864 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 462ms/step - loss: 0.1946 - val_loss: 0.9998 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=10 tf_ratio=0.526 -> TF=ON
Epoch 10/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 22s 482ms/step - loss: 0.1835 - val_loss: 0.4994 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=11 tf_ratio=0.474 -> TF=ON
Epoch 11/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 0s 451ms/step - loss: 0.1763
Epoch 11: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 464ms/step - loss: 0.1742 - val_loss: 0.8783 - learning_rate: 5.0000e-04
Epoch 11: early stopping
Restoring model weights from the end of the best epoch: 6.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 20
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 1.98886518
1 day(s) RMSE                      : 6.76937606
1 day(s) R2                        : -0.01997245
1 day(s) Pearson r                 : 0.25823553
1 day(s) QLIKE                     : 0.45828279
3 day(s) MAE                       : 1.97812001
3 day(s) RMSE                      : 6.81766424
3 day(s) R2                        : -0.03464512
3 day(s) Pearson r                 : 0.02518825
3 day(s) QLIKE                     : 0.47323862
5 day(s) MAE                       : 1.98209058
5 day(s) RMSE                      : 6.83140741
5 day(s) R2                        : -0.03870410
5 day(s) Pearson r                 : 0.00886094
5 day(s) QLIKE                     : 0.47509543
10 day(s) MAE                      : 2.02694648
10 day(s) RMSE                     : 6.80434563
10 day(s) R2                       : -0.03041543
10 day(s) Pearson r                : 0.00528899
10 day(s) QLIKE                    : 0.47787671
20 day(s) MAE                      : 2.15855082
20 day(s) RMSE                     : 6.98159968
20 day(s) R2                       : -0.02219672
20 day(s) Pearson r                : 0.00694684
20 day(s) QLIKE                    : 0.49360606
full horizon MAE                   : 2.15855082
full horizon RMSE                  : 6.98159968
full horizon R2                    : -0.02219672
full horizon Pearson r             : 0.00694684
full horizon QLIKE                 : 0.49360606

--- Task 2 ---
1 day(s) MAE                       : 0.01958084
1 day(s) RMSE                      : 0.03417473
1 day(s) R2                        : -0.23179132
1 day(s) Pearson r                 : 0.05071413
1 day(s) QLIKE                     : 13.11202109
3 day(s) MAE                       : 0.01907710
3 day(s) RMSE                      : 0.03539899
3 day(s) R2                        : -0.32162685
3 day(s) Pearson r                 : 0.00210716
3 day(s) QLIKE                     : 17.10902322
5 day(s) MAE                       : 0.01896482
5 day(s) RMSE                      : 0.03567116
5 day(s) R2                        : -0.34202746
5 day(s) Pearson r                 : 0.00146058
5 day(s) QLIKE                     : 20.28693197
10 day(s) MAE                      : 0.01888060
10 day(s) RMSE                     : 0.03587392
10 day(s) R2                       : -0.35732791
10 day(s) Pearson r                : 0.00096332
10 day(s) QLIKE                    : 17.72364256
20 day(s) MAE                      : 0.01888520
20 day(s) RMSE                     : 0.03599142
20 day(s) R2                       : -0.36705198
20 day(s) Pearson r                : 0.00028542
20 day(s) QLIKE                    : 15.43411716
full horizon MAE                   : 0.01888520
full horizon RMSE                  : 0.03599142
full horizon R2                    : -0.36705198
full horizon Pearson r             : 0.00028542
full horizon QLIKE                 : 15.43411716

--- Task 3 ---
1 day(s) MAE                       : 0.22044988
1 day(s) RMSE                      : 0.29953173
1 day(s) R2                        : -0.01322410
1 day(s) Pearson r                 : 0.05768947
1 day(s) QLIKE                     : 1.53487512
3 day(s) MAE                       : 0.21819802
3 day(s) RMSE                      : 0.30249520
3 day(s) R2                        : -0.03325164
3 day(s) Pearson r                 : -0.00409841
3 day(s) QLIKE                     : 1.53504571
5 day(s) MAE                       : 0.21814106
5 day(s) RMSE                      : 0.30246197
5 day(s) R2                        : -0.03286981
5 day(s) Pearson r                 : -0.00862321
5 day(s) QLIKE                     : 1.53502820
10 day(s) MAE                      : 0.22041222
10 day(s) RMSE                     : 0.30033844
10 day(s) R2                       : -0.01801627
10 day(s) Pearson r                : 0.00914782
10 day(s) QLIKE                    : 1.53513238
20 day(s) MAE                      : 0.22444092
20 day(s) RMSE                     : 0.29902972
20 day(s) R2                       : -0.00913527
20 day(s) Pearson r                : 0.00448845
20 day(s) QLIKE                    : 1.53536035
full horizon MAE                   : 0.22444092
full horizon RMSE                  : 0.29902972
full horizon R2                    : -0.00913527
full horizon Pearson r             : 0.00448845
full horizon QLIKE                 : 1.53536035

--- Task 4 ---
1 day(s) MAE                       : 0.35981570
1 day(s) RMSE                      : 0.60808029
1 day(s) R2                        : -0.40193193
1 day(s) Pearson r                 : 0.04771480
1 day(s) QLIKE                     : 2.41482785
3 day(s) MAE                       : 0.36123164
3 day(s) RMSE                      : 0.61118439
3 day(s) R2                        : -0.41630759
3 day(s) Pearson r                 : -0.00059906
3 day(s) QLIKE                     : 2.43739071
5 day(s) MAE                       : 0.35785916
5 day(s) RMSE                      : 0.60037148
5 day(s) R2                        : -0.36658141
5 day(s) Pearson r                 : 0.00640399
5 day(s) QLIKE                     : 2.48997613
10 day(s) MAE                      : 0.35524244
10 day(s) RMSE                     : 0.56988657
10 day(s) R2                       : -0.23112905
10 day(s) Pearson r                : 0.00658217
10 day(s) QLIKE                    : 2.64725435
20 day(s) MAE                      : 0.39899784
20 day(s) RMSE                     : 0.55178461
20 day(s) R2                       : -0.15462945
20 day(s) Pearson r                : 0.00388012
20 day(s) QLIKE                    : 2.82999380
full horizon MAE                   : 0.39899784
full horizon RMSE                  : 0.55178461
full horizon R2                    : -0.15462945
full horizon Pearson r             : 0.00388012
full horizon QLIKE                 : 2.82999380

--- Task 5 ---
1 day(s) MAE                       : 0.12235100
1 day(s) RMSE                      : 0.12734595
1 day(s) R2                        : -11.92901339
1 day(s) Pearson r                 : -0.08861294
1 day(s) QLIKE                     : 0.08039153
3 day(s) MAE                       : 0.12107725
3 day(s) RMSE                      : 0.12620874
3 day(s) R2                        : -11.77197887
3 day(s) Pearson r                 : -0.01684983
3 day(s) QLIKE                     : 0.25737520
5 day(s) MAE                       : 0.12472748
5 day(s) RMSE                      : 0.12973706
5 day(s) R2                        : -12.55638708
5 day(s) Pearson r                 : -0.01055463
5 day(s) QLIKE                     : 6.93764917
10 day(s) MAE                      : 0.12753204
10 day(s) RMSE                     : 0.13237117
10 day(s) R2                       : -13.20782529
10 day(s) Pearson r                : -0.00772788
10 day(s) QLIKE                    : 3.80121026
20 day(s) MAE                      : 0.12876299
20 day(s) RMSE                     : 0.13355309
20 day(s) R2                       : -13.35174988
20 day(s) Pearson r                : -0.00380739
20 day(s) QLIKE                    : 1.97184411
full horizon MAE                   : 0.12876299
full horizon RMSE                  : 0.13355309
full horizon R2                    : -13.35174988
full horizon Pearson r             : -0.00380739
full horizon QLIKE                 : 1.97184411

--- Task 6 ---
1 day(s) MAE                       : 3.50439813
1 day(s) RMSE                      : 4.70069509
1 day(s) R2                        : -0.04012513
1 day(s) Pearson r                 : -0.04062261
1 day(s) QLIKE                     : 0.14310237
3 day(s) MAE                       : 3.54309113
3 day(s) RMSE                      : 4.72994684
3 day(s) R2                        : -0.04867739
3 day(s) Pearson r                 : -0.00027379
3 day(s) QLIKE                     : 0.14446786
5 day(s) MAE                       : 3.55322358
5 day(s) RMSE                      : 4.71696601
5 day(s) R2                        : -0.03855475
5 day(s) Pearson r                 : 0.00084018
5 day(s) QLIKE                     : 0.14789685
10 day(s) MAE                      : 3.94863327
10 day(s) RMSE                     : 5.09153196
10 day(s) R2                       : -0.19734034
10 day(s) Pearson r                : 0.00132221
10 day(s) QLIKE                    : 0.16129947
20 day(s) MAE                      : 5.12150836
20 day(s) RMSE                     : 6.34123074
20 day(s) R2                       : -0.81823839
20 day(s) Pearson r                : 0.00338737
20 day(s) QLIKE                    : 0.17850778
full horizon MAE                   : 5.12150836
full horizon RMSE                  : 6.34123074
full horizon R2                    : -0.81823839
full horizon Pearson r             : 0.00338737
full horizon QLIKE                 : 0.17850778

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/MSFT/Custom_KAN_LSTM_H20.pkl

Saved y_true min=0.27301, max=89.8516
Saved y_pred min=1.39824, max=2.99016

=== GE | H=1 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.10128934238557
  Min value:  -0.033788504883313815
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.030029259880507
  Min value:  -8.1422898936537
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4221295497976101
  Min value:  -0.03368162544886536
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.0718644033108666
  Min value:  -68.7433710807441
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4422974929765818
  Min value:  -0.03368162544886536
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.646703896552387
  Min value:  -3.0804908670397175
Epoch 001 | phase=1 | train_loss=5.0710 | val_main=0.842711
Epoch 002 | phase=1 | train_loss=4.7646 | val_main=0.842600
Epoch 003 | phase=1 | train_loss=4.5321 | val_main=0.842775
Epoch 004 | phase=1 | train_loss=4.5756 | val_main=0.842752
Epoch 005 | phase=1 | train_loss=4.5848 | val_main=0.842871
Epoch 006 | phase=1 | train_loss=4.5434 | val_main=0.842617
Epoch 007 | phase=1 | train_loss=4.5911 | val_main=0.842696
Epoch 008 | phase=1 | train_loss=4.4970 | val_main=0.842798
Epoch 009 | phase=1 | train_loss=4.4980 | val_main=0.842865
Epoch 010 | phase=1 | train_loss=4.5037 | val_main=0.842897
Epoch 011 | phase=1 | train_loss=4.5152 | val_main=0.842853
Epoch 012 | phase=1 | train_loss=4.4820 | val_main=0.842826
Epoch 013 | phase=1 | train_loss=4.4902 | val_main=0.842879
Epoch 014 | phase=1 | train_loss=4.5328 | val_main=0.842760
Epoch 015 | phase=1 | train_loss=4.4615 | val_main=0.842848
Epoch 016 | phase=0 | train_loss=5.5637 | val_main=0.794035
Epoch 017 | phase=0 | train_loss=5.4957 | val_main=0.866750
Epoch 018 | phase=0 | train_loss=5.5295 | val_main=0.896227
Epoch 019 | phase=0 | train_loss=5.5653 | val_main=0.858292
Epoch 020 | phase=0 | train_loss=5.6043 | val_main=0.873546
Epoch 021 | phase=0 | train_loss=5.4805 | val_main=0.880784
Epoch 022 | phase=0 | train_loss=5.5055 | val_main=0.886490
Epoch 023 | phase=0 | train_loss=5.5762 | val_main=0.900653
Epoch 024 | phase=0 | train_loss=5.5275 | val_main=0.829073
Epoch 025 | phase=0 | train_loss=5.5597 | val_main=0.885910
Epoch 026 | phase=0 | train_loss=5.5787 | val_main=0.842028
Epoch 027 | phase=0 | train_loss=5.3252 | val_main=1.179669
Epoch 028 | phase=0 | train_loss=5.2558 | val_main=0.438044
Epoch 029 | phase=0 | train_loss=5.1978 | val_main=0.441264
Epoch 030 | phase=0 | train_loss=5.1526 | val_main=0.604834
Epoch 031 | phase=2 | train_loss=0.6096 | val_main=0.618603
Epoch 032 | phase=2 | train_loss=0.6397 | val_main=0.679918
Epoch 033 | phase=2 | train_loss=0.6083 | val_main=0.854173
Epoch 034 | phase=2 | train_loss=0.6119 | val_main=0.541600
Epoch 035 | phase=2 | train_loss=0.5788 | val_main=0.663367
Epoch 036 | phase=2 | train_loss=0.5569 | val_main=0.564477
Epoch 037 | phase=2 | train_loss=0.6752 | val_main=0.658690
Epoch 038 | phase=2 | train_loss=0.6002 | val_main=0.643394
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 6
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.12300584
1 day(s) RMSE                      : 27.56693370
1 day(s) R2                        : -0.01029016
1 day(s) Pearson r                 : 0.01215590
1 day(s) QLIKE                     : 0.69133027
full horizon MAE                   : 4.12300584
full horizon RMSE                  : 27.56693370
full horizon R2                    : -0.01029016
full horizon Pearson r             : 0.01215590
full horizon QLIKE                 : 0.69133027

--- Task 2 ---
1 day(s) MAE                       : 0.05021190
1 day(s) RMSE                      : 0.06219128
1 day(s) R2                        : -1.69440917
1 day(s) Pearson r                 : -0.03589597
1 day(s) QLIKE                     : 1.99235480
full horizon MAE                   : 0.05021190
full horizon RMSE                  : 0.06219128
full horizon R2                    : -1.69440917
full horizon Pearson r             : -0.03589597
full horizon QLIKE                 : 1.99235480

--- Task 3 ---
1 day(s) MAE                       : 0.75169579
1 day(s) RMSE                      : 0.77351799
1 day(s) R2                        : -16.88775226
1 day(s) Pearson r                 : 0.12575288
1 day(s) QLIKE                     : 0.10660118
full horizon MAE                   : 0.75169579
full horizon RMSE                  : 0.77351799
full horizon R2                    : -16.88775226
full horizon Pearson r             : 0.12575288
full horizon QLIKE                 : 0.10660118

--- Task 4 ---
1 day(s) MAE                       : 0.56401685
1 day(s) RMSE                      : 0.99354596
1 day(s) R2                        : -0.17351806
1 day(s) Pearson r                 : 0.10153469
1 day(s) QLIKE                     : 0.61852354
full horizon MAE                   : 0.56401685
full horizon RMSE                  : 0.99354596
full horizon R2                    : -0.17351806
full horizon Pearson r             : 0.10153469
full horizon QLIKE                 : 0.61852354

--- Task 5 ---
1 day(s) MAE                       : 0.23230417
1 day(s) RMSE                      : 0.23950211
1 day(s) R2                        : -15.89067576
1 day(s) Pearson r                 : nan
1 day(s) QLIKE                     : 0.02992601
full horizon MAE                   : 0.23230417
full horizon RMSE                  : 0.23950211
full horizon R2                    : -15.89067576
full horizon Pearson r             : nan
full horizon QLIKE                 : 0.02992601

--- Task 6 ---
1 day(s) MAE                       : 1.52036156
1 day(s) RMSE                      : 1.91765234
1 day(s) R2                        : -0.00322258
1 day(s) Pearson r                 : -0.31067792
1 day(s) QLIKE                     : 0.08090927
full horizon MAE                   : 1.52036156
full horizon RMSE                  : 1.91765234
full horizon R2                    : -0.00322258
full horizon Pearson r             : -0.31067792
full horizon QLIKE                 : 0.08090927

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/GE/Custom_KAN_H1.pkl

Saved y_true min=0.591065, max=527.531
Saved y_pred min=2.56524, max=9.73254

=== GE | H=1 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2018
Time steps for y: 1
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.10128934238557
  Min value:  -0.033788504883313815
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.030029259880507
  Min value:  -8.1422898936537
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4221295497976101
  Min value:  -0.03368162544886536
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.0718644033108666
  Min value:  -68.7433710807441
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4422974929765818
  Min value:  -0.03368162544886536
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.646703896552387
  Min value:  -3.0804908670397175
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 103s 346ms/step - loss: 1.0123 - val_loss: 1.0138 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 7s 161ms/step - loss: 0.9989 - val_loss: 0.9955 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 8s 182ms/step - loss: 0.9997 - val_loss: 1.0197 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 8s 184ms/step - loss: 0.9996 - val_loss: 1.0497 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 8s 183ms/step - loss: 0.9991 - val_loss: 1.0178 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 186ms/step - loss: 0.9999 - val_loss: 1.0462 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 0s 179ms/step - loss: 1.0202
Epoch 7: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 185ms/step - loss: 0.9996 - val_loss: 1.0409 - learning_rate: 5.0000e-04
Epoch 7: early stopping
Restoring model weights from the end of the best epoch: 2.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 1
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.87094318
1 day(s) RMSE                      : 27.45119317
1 day(s) R2                        : -0.00182450
1 day(s) Pearson r                 : 0.04686902
1 day(s) QLIKE                     : 0.68436841
full horizon MAE                   : 4.87094318
full horizon RMSE                  : 27.45119317
full horizon R2                    : -0.00182450
full horizon Pearson r             : 0.04686902
full horizon QLIKE                 : 0.68436841

--- Task 2 ---
1 day(s) MAE                       : 0.04726677
1 day(s) RMSE                      : 0.05921011
1 day(s) R2                        : -1.44228490
1 day(s) Pearson r                 : 0.04266968
1 day(s) QLIKE                     : 1.99187496
full horizon MAE                   : 0.04726677
full horizon RMSE                  : 0.05921011
full horizon R2                    : -1.44228490
full horizon Pearson r             : 0.04266968
full horizon QLIKE                 : 1.99187496

--- Task 3 ---
1 day(s) MAE                       : 0.75403278
1 day(s) RMSE                      : 0.77582798
1 day(s) R2                        : -16.99474986
1 day(s) Pearson r                 : 0.04756052
1 day(s) QLIKE                     : 0.10599449
full horizon MAE                   : 0.75403278
full horizon RMSE                  : 0.77582798
full horizon R2                    : -16.99474986
full horizon Pearson r             : 0.04756052
full horizon QLIKE                 : 0.10599449

--- Task 4 ---
1 day(s) MAE                       : 0.56416489
1 day(s) RMSE                      : 0.97783457
1 day(s) R2                        : -0.13669678
1 day(s) Pearson r                 : 0.00111095
1 day(s) QLIKE                     : 0.61887840
full horizon MAE                   : 0.56416489
full horizon RMSE                  : 0.97783457
full horizon R2                    : -0.13669678
full horizon Pearson r             : 0.00111095
full horizon QLIKE                 : 0.61887840

--- Task 5 ---
1 day(s) MAE                       : 0.23230417
1 day(s) RMSE                      : 0.23950211
1 day(s) R2                        : -15.89067577
1 day(s) Pearson r                 : nan
1 day(s) QLIKE                     : 0.02992601
full horizon MAE                   : 0.23230417
full horizon RMSE                  : 0.23950211
full horizon R2                    : -15.89067577
full horizon Pearson r             : nan
full horizon QLIKE                 : 0.02992601

--- Task 6 ---
1 day(s) MAE                       : 1.46652432
1 day(s) RMSE                      : 1.92290122
1 day(s) R2                        : -0.00872203
1 day(s) Pearson r                 : 0.10899651
1 day(s) QLIKE                     : 0.08064682
full horizon MAE                   : 1.46652432
full horizon RMSE                  : 1.92290122
full horizon R2                    : -0.00872203
full horizon Pearson r             : 0.10899651
full horizon QLIKE                 : 0.08064682

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/GE/Custom_KAN_LSTM_H1.pkl

Saved y_true min=0.591065, max=527.531
Saved y_pred min=4.74942, max=4.75008

=== GE | H=5 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.10128934238557
  Min value:  -0.033788504883313815
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.05628901565783
  Min value:  -8.143860691767992
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4221295497976101
  Min value:  -0.03368162544886536
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.0759413314511823
  Min value:  -68.74229956296557
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4422974929765818
  Min value:  -0.03368162544886536
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.658989486293699
  Min value:  -3.0732655627744725
Epoch 001 | phase=1 | train_loss=5.0952 | val_main=0.867658
Epoch 002 | phase=1 | train_loss=4.9363 | val_main=0.867721
Epoch 003 | phase=1 | train_loss=4.5525 | val_main=0.867845
Epoch 004 | phase=1 | train_loss=4.5616 | val_main=0.867784
Epoch 005 | phase=1 | train_loss=4.5859 | val_main=0.867760
Epoch 006 | phase=1 | train_loss=4.5461 | val_main=0.867722
Epoch 007 | phase=1 | train_loss=4.5756 | val_main=0.867722
Epoch 008 | phase=1 | train_loss=4.5846 | val_main=0.867731
Epoch 009 | phase=1 | train_loss=4.5034 | val_main=0.867746
Epoch 010 | phase=1 | train_loss=4.5166 | val_main=0.867771
Epoch 011 | phase=1 | train_loss=4.5179 | val_main=0.867754
Epoch 012 | phase=1 | train_loss=4.4989 | val_main=0.867738
Epoch 013 | phase=1 | train_loss=4.5151 | val_main=0.867742
Epoch 014 | phase=1 | train_loss=4.5484 | val_main=0.867720
Epoch 015 | phase=1 | train_loss=4.4582 | val_main=0.867731
Epoch 016 | phase=0 | train_loss=5.5785 | val_main=0.852679
Epoch 017 | phase=0 | train_loss=5.5946 | val_main=0.882634
Epoch 018 | phase=0 | train_loss=5.5319 | val_main=0.916721
Epoch 019 | phase=0 | train_loss=5.5679 | val_main=0.883187
Epoch 020 | phase=0 | train_loss=5.6231 | val_main=0.895322
Epoch 021 | phase=0 | train_loss=5.5177 | val_main=0.907959
Epoch 022 | phase=0 | train_loss=5.5440 | val_main=0.904905
Epoch 023 | phase=0 | train_loss=5.5521 | val_main=0.919954
Epoch 024 | phase=0 | train_loss=5.5140 | val_main=0.879151
Epoch 025 | phase=0 | train_loss=5.4517 | val_main=0.775710
Epoch 026 | phase=0 | train_loss=5.3584 | val_main=0.642987
Epoch 027 | phase=0 | train_loss=5.1670 | val_main=0.560745
Epoch 028 | phase=0 | train_loss=5.1984 | val_main=0.528654
Epoch 029 | phase=0 | train_loss=5.1512 | val_main=0.594116
Epoch 030 | phase=0 | train_loss=5.1045 | val_main=0.663784
Epoch 031 | phase=2 | train_loss=0.6572 | val_main=0.578249
Epoch 032 | phase=2 | train_loss=0.6440 | val_main=0.804896
Epoch 033 | phase=2 | train_loss=0.6400 | val_main=0.640228
Epoch 034 | phase=2 | train_loss=0.6040 | val_main=0.551303
Epoch 035 | phase=2 | train_loss=0.6084 | val_main=0.647560
Epoch 036 | phase=2 | train_loss=0.5857 | val_main=0.535005
Epoch 037 | phase=2 | train_loss=0.6273 | val_main=0.633939
Epoch 038 | phase=2 | train_loss=0.5835 | val_main=0.538227
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 30
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.25232748
1 day(s) RMSE                      : 27.62316197
1 day(s) R2                        : -0.01441574
1 day(s) Pearson r                 : 0.01692081
1 day(s) QLIKE                     : 0.72270956
3 day(s) MAE                       : 4.28871779
3 day(s) RMSE                      : 27.63199939
3 day(s) R2                        : -0.01512333
3 day(s) Pearson r                 : 0.00767785
3 day(s) QLIKE                     : 0.73446514
5 day(s) MAE                       : 4.32651428
5 day(s) RMSE                      : 27.63859680
5 day(s) R2                        : -0.01566630
5 day(s) Pearson r                 : 0.00157877
5 day(s) QLIKE                     : 0.74317571
full horizon MAE                   : 4.32651428
full horizon RMSE                  : 27.63859680
full horizon R2                    : -0.01566630
full horizon Pearson r             : 0.00157877
full horizon QLIKE                 : 0.74317571

--- Task 2 ---
1 day(s) MAE                       : 0.05034019
1 day(s) RMSE                      : 0.06231861
1 day(s) R2                        : -1.70545341
1 day(s) Pearson r                 : -0.00731574
1 day(s) QLIKE                     : 1.99197639
3 day(s) MAE                       : 0.05058089
3 day(s) RMSE                      : 0.06254951
3 day(s) R2                        : -1.71860472
3 day(s) Pearson r                 : 0.00499071
3 day(s) QLIKE                     : 1.93427160
5 day(s) MAE                       : 0.05091395
5 day(s) RMSE                      : 0.06287211
5 day(s) R2                        : -1.74031101
5 day(s) Pearson r                 : -0.00345166
5 day(s) QLIKE                     : 1.87690970
full horizon MAE                   : 0.05091395
full horizon RMSE                  : 0.06287211
full horizon R2                    : -1.74031101
full horizon Pearson r             : -0.00345166
full horizon QLIKE                 : 1.87690970

--- Task 3 ---
1 day(s) MAE                       : 0.75160391
1 day(s) RMSE                      : 0.77342339
1 day(s) R2                        : -16.88337715
1 day(s) Pearson r                 : 0.12263347
1 day(s) QLIKE                     : 0.10783720
3 day(s) MAE                       : 0.75087743
3 day(s) RMSE                      : 0.77265497
3 day(s) R2                        : -16.90212633
3 day(s) Pearson r                 : 0.11116016
3 day(s) QLIKE                     : 0.10829613
5 day(s) MAE                       : 0.75008868
5 day(s) RMSE                      : 0.77182233
5 day(s) R2                        : -16.91932906
5 day(s) Pearson r                 : 0.11001806
5 day(s) QLIKE                     : 0.10807462
full horizon MAE                   : 0.75008868
full horizon RMSE                  : 0.77182233
full horizon R2                    : -16.91932906
full horizon Pearson r             : 0.11001806
full horizon QLIKE                 : 0.10807462

--- Task 4 ---
1 day(s) MAE                       : 0.56439930
1 day(s) RMSE                      : 0.99612561
1 day(s) R2                        : -0.17961983
1 day(s) Pearson r                 : 0.09254045
1 day(s) QLIKE                     : 0.61856579
3 day(s) MAE                       : 0.56571367
3 day(s) RMSE                      : 0.99636728
3 day(s) R2                        : -0.18117582
3 day(s) Pearson r                 : 0.05829948
3 day(s) QLIKE                     : 0.61116607
5 day(s) MAE                       : 0.56699166
5 day(s) RMSE                      : 0.99675770
5 day(s) R2                        : -0.18319605
5 day(s) Pearson r                 : 0.04958130
5 day(s) QLIKE                     : 0.60445765
full horizon MAE                   : 0.56699166
full horizon RMSE                  : 0.99675770
full horizon R2                    : -0.18319605
full horizon Pearson r             : 0.04958130
full horizon QLIKE                 : 0.60445765

--- Task 5 ---
1 day(s) MAE                       : 0.23230417
1 day(s) RMSE                      : 0.23950211
1 day(s) R2                        : -15.89067576
1 day(s) Pearson r                 : nan
1 day(s) QLIKE                     : 0.02992601
3 day(s) MAE                       : 0.23234204
3 day(s) RMSE                      : 0.23953830
3 day(s) R2                        : -15.89704844
3 day(s) Pearson r                 : -0.00775954
3 day(s) QLIKE                     : 0.02992349
5 day(s) MAE                       : 0.23243579
5 day(s) RMSE                      : 0.23962873
5 day(s) R2                        : -15.91100721
5 day(s) Pearson r                 : -0.00982113
5 day(s) QLIKE                     : 0.02991979
full horizon MAE                   : 0.23243579
full horizon RMSE                  : 0.23962873
full horizon R2                    : -15.91100721
full horizon Pearson r             : -0.00982113
full horizon QLIKE                 : 0.02991979

--- Task 6 ---
1 day(s) MAE                       : 1.52324837
1 day(s) RMSE                      : 1.91833153
1 day(s) R2                        : -0.00393335
1 day(s) Pearson r                 : -0.33402342
1 day(s) QLIKE                     : 0.08098887
3 day(s) MAE                       : 1.52220385
3 day(s) RMSE                      : 1.91895429
3 day(s) R2                        : -0.00416266
3 day(s) Pearson r                 : -0.31603402
3 day(s) QLIKE                     : 0.08111398
5 day(s) MAE                       : 1.52298537
5 day(s) RMSE                      : 1.91911238
5 day(s) R2                        : -0.00396300
5 day(s) Pearson r                 : -0.31823166
5 day(s) QLIKE                     : 0.08120334
full horizon MAE                   : 1.52298537
full horizon RMSE                  : 1.91911238
full horizon R2                    : -0.00396300
full horizon Pearson r             : -0.31823166
full horizon QLIKE                 : 0.08120334

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/GE/Custom_KAN_H5.pkl

Saved y_true min=0.591065, max=527.531
Saved y_pred min=1.86163, max=14.0646

=== GE | H=5 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2018
Time steps for y: 5
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.10128934238557
  Min value:  -0.033788504883313815
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.05628901565783
  Min value:  -8.143860691767992
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4221295497976101
  Min value:  -0.03368162544886536
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.0759413314511823
  Min value:  -68.74229956296557
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4422974929765818
  Min value:  -0.03368162544886536
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.658989486293699
  Min value:  -3.0732655627744725
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 292s 523ms/step - loss: 0.7962 - val_loss: 0.9396 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 201ms/step - loss: 0.4696 - val_loss: 0.7999 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 237ms/step - loss: 0.4058 - val_loss: 0.6875 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 236ms/step - loss: 0.3903 - val_loss: 0.8477 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 236ms/step - loss: 0.3913 - val_loss: 0.6796 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 236ms/step - loss: 0.3778 - val_loss: 0.8743 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 237ms/step - loss: 0.3704 - val_loss: 0.9267 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 238ms/step - loss: 0.3645 - val_loss: 0.8520 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 235ms/step - loss: 0.3641 - val_loss: 1.0154 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=10 tf_ratio=0.526 -> TF=ON
Epoch 10/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 0s 229ms/step - loss: 0.3749
Epoch 10: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 236ms/step - loss: 0.3607 - val_loss: 0.8607 - learning_rate: 5.0000e-04
Epoch 10: early stopping
Restoring model weights from the end of the best epoch: 5.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 5
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.98183244
1 day(s) RMSE                      : 27.44374771
1 day(s) R2                        : -0.00128114
1 day(s) Pearson r                 : 0.02482704
1 day(s) QLIKE                     : 0.68436680
3 day(s) MAE                       : 4.53034841
3 day(s) RMSE                      : 27.51211268
3 day(s) R2                        : -0.00633383
3 day(s) Pearson r                 : -0.00024956
3 day(s) QLIKE                     : 0.70409702
5 day(s) MAE                       : 4.52386564
5 day(s) RMSE                      : 27.50298383
5 day(s) R2                        : -0.00572371
5 day(s) Pearson r                 : -0.00012191
5 day(s) QLIKE                     : 0.69526227
full horizon MAE                   : 4.52386564
full horizon RMSE                  : 27.50298383
full horizon R2                    : -0.00572371
full horizon Pearson r             : -0.00012191
full horizon QLIKE                 : 0.69526227

--- Task 2 ---
1 day(s) MAE                       : 0.04693067
1 day(s) RMSE                      : 0.05887640
1 day(s) R2                        : -1.41483273
1 day(s) Pearson r                 : -0.04923871
1 day(s) QLIKE                     : 1.99188176
3 day(s) MAE                       : 0.05150931
3 day(s) RMSE                      : 0.06354344
3 day(s) R2                        : -1.80569007
3 day(s) Pearson r                 : -0.00598215
3 day(s) QLIKE                     : 5.48770888
5 day(s) MAE                       : 0.05291375
5 day(s) RMSE                      : 0.06490337
5 day(s) R2                        : -1.92023825
5 day(s) Pearson r                 : -0.00873285
5 day(s) QLIKE                     : 8.99527059
full horizon MAE                   : 0.05291375
full horizon RMSE                  : 0.06490337
full horizon R2                    : -1.92023825
full horizon Pearson r             : -0.00873285
full horizon QLIKE                 : 8.99527059

--- Task 3 ---
1 day(s) MAE                       : 0.74973768
1 day(s) RMSE                      : 0.77161288
1 day(s) R2                        : -16.79974887
1 day(s) Pearson r                 : -0.01092518
1 day(s) QLIKE                     : 0.10599550
3 day(s) MAE                       : 0.75641699
3 day(s) RMSE                      : 0.77812578
3 day(s) R2                        : -17.15653703
3 day(s) Pearson r                 : 0.00324080
3 day(s) QLIKE                     : 3.92358131
5 day(s) MAE                       : 0.75711023
5 day(s) RMSE                      : 0.77874180
5 day(s) R2                        : -17.24206667
5 day(s) Pearson r                 : 0.00454460
5 day(s) QLIKE                     : 2.54148353
full horizon MAE                   : 0.75711023
full horizon RMSE                  : 0.77874180
full horizon R2                    : -17.24206667
full horizon Pearson r             : 0.00454460
full horizon QLIKE                 : 2.54148353

--- Task 4 ---
1 day(s) MAE                       : 0.56565902
1 day(s) RMSE                      : 1.00282829
1 day(s) R2                        : -0.19554798
1 day(s) Pearson r                 : -0.01086371
1 day(s) QLIKE                     : 0.61887970
3 day(s) MAE                       : 0.61972148
3 day(s) RMSE                      : 1.06991295
3 day(s) R2                        : -0.36198563
3 day(s) Pearson r                 : -0.00289837
3 day(s) QLIKE                     : 0.66594440
5 day(s) MAE                       : 0.65900873
5 day(s) RMSE                      : 1.10463669
5 day(s) R2                        : -0.45317006
5 day(s) Pearson r                 : -0.00481072
5 day(s) QLIKE                     : 0.70358296
full horizon MAE                   : 0.65900873
full horizon RMSE                  : 1.10463669
full horizon R2                    : -0.45317006
full horizon Pearson r             : -0.00481072
full horizon QLIKE                 : 0.70358296

--- Task 5 ---
1 day(s) MAE                       : 0.23230417
1 day(s) RMSE                      : 0.23950211
1 day(s) R2                        : -15.89067575
1 day(s) Pearson r                 : nan
1 day(s) QLIKE                     : 0.02992601
3 day(s) MAE                       : 0.23234204
3 day(s) RMSE                      : 0.23953830
3 day(s) R2                        : -15.89704832
3 day(s) Pearson r                 : -0.00001143
3 day(s) QLIKE                     : 0.02992349
5 day(s) MAE                       : 0.22426692
5 day(s) RMSE                      : 0.23228089
5 day(s) R2                        : -14.88980780
5 day(s) Pearson r                 : 0.00183518
5 day(s) QLIKE                     : 2.88763051
full horizon MAE                   : 0.22426692
full horizon RMSE                  : 0.23228089
full horizon R2                    : -14.88980780
full horizon Pearson r             : 0.00183518
full horizon QLIKE                 : 2.88763051

--- Task 6 ---
1 day(s) MAE                       : 1.46581179
1 day(s) RMSE                      : 1.92306419
1 day(s) R2                        : -0.00889301
1 day(s) Pearson r                 : 0.12480872
1 day(s) QLIKE                     : 0.08064555
3 day(s) MAE                       : 1.42406443
3 day(s) RMSE                      : 1.94094621
3 day(s) R2                        : -0.02731070
3 day(s) Pearson r                 : 0.00306042
3 day(s) QLIKE                     : 0.08097039
5 day(s) MAE                       : 1.40532750
5 day(s) RMSE                      : 1.95584484
5 day(s) R2                        : -0.04276319
5 day(s) Pearson r                 : 0.00544966
5 day(s) QLIKE                     : 0.08125079
full horizon MAE                   : 1.40532750
full horizon RMSE                  : 1.95584484
full horizon R2                    : -0.04276319
full horizon Pearson r             : 0.00544966
full horizon QLIKE                 : 0.08125079

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/GE/Custom_KAN_LSTM_H5.pkl

Saved y_true min=0.591065, max=527.531
Saved y_pred min=2.98422, max=4.94167

=== GE | H=10 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 10
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.10128934238557
  Min value:  -0.033788504883313815
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.08059546680979
  Min value:  -8.14578298619116
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4221295497976101
  Min value:  -0.03368162544886536
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.079343715915981
  Min value:  -68.74402590450526
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4422974929765818
  Min value:  -0.03368162544886536
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.670127332275285
  Min value:  -3.0551258580114347
Epoch 001 | phase=1 | train_loss=5.0877 | val_main=0.895029
Epoch 002 | phase=1 | train_loss=4.9364 | val_main=0.894988
Epoch 003 | phase=1 | train_loss=4.6067 | val_main=0.894868
Epoch 004 | phase=1 | train_loss=4.5586 | val_main=0.894927
Epoch 005 | phase=1 | train_loss=4.6137 | val_main=0.894852
Epoch 006 | phase=1 | train_loss=4.5615 | val_main=0.894955
Epoch 007 | phase=1 | train_loss=4.5752 | val_main=0.894936
Epoch 008 | phase=1 | train_loss=4.6092 | val_main=0.894919
Epoch 009 | phase=1 | train_loss=4.5144 | val_main=0.894881
Epoch 010 | phase=1 | train_loss=4.5313 | val_main=0.894833
Epoch 011 | phase=1 | train_loss=4.5185 | val_main=0.894866
Epoch 012 | phase=1 | train_loss=4.5140 | val_main=0.894888
Epoch 013 | phase=1 | train_loss=4.5308 | val_main=0.894862
Epoch 014 | phase=1 | train_loss=4.5619 | val_main=0.894951
Epoch 015 | phase=1 | train_loss=4.4766 | val_main=0.894956
Epoch 016 | phase=0 | train_loss=5.5831 | val_main=0.890596
Epoch 017 | phase=0 | train_loss=5.5683 | val_main=0.917878
Epoch 018 | phase=0 | train_loss=5.5357 | val_main=0.947994
Epoch 019 | phase=0 | train_loss=5.5790 | val_main=0.917118
Epoch 020 | phase=0 | train_loss=5.5944 | val_main=0.925617
Epoch 021 | phase=0 | train_loss=5.5631 | val_main=0.937675
Epoch 022 | phase=0 | train_loss=5.5438 | val_main=0.940899
Epoch 023 | phase=0 | train_loss=5.5395 | val_main=0.941041
Epoch 024 | phase=0 | train_loss=5.5291 | val_main=0.907607
Epoch 025 | phase=0 | train_loss=5.4958 | val_main=0.753905
Epoch 026 | phase=0 | train_loss=5.3633 | val_main=0.785887
Epoch 027 | phase=0 | train_loss=5.2053 | val_main=0.708950
Epoch 028 | phase=0 | train_loss=5.1785 | val_main=0.828178
Epoch 029 | phase=0 | train_loss=5.0856 | val_main=0.562351
Epoch 030 | phase=0 | train_loss=4.9877 | val_main=0.566214
Epoch 031 | phase=2 | train_loss=0.6909 | val_main=0.790834
Epoch 032 | phase=2 | train_loss=0.6782 | val_main=0.708562
Epoch 033 | phase=2 | train_loss=0.6644 | val_main=0.598293
Epoch 034 | phase=2 | train_loss=0.6319 | val_main=0.547346
Epoch 035 | phase=2 | train_loss=0.6243 | val_main=0.697069
Epoch 036 | phase=2 | train_loss=0.6140 | val_main=0.528479
Epoch 037 | phase=2 | train_loss=0.6491 | val_main=0.665404
Epoch 038 | phase=2 | train_loss=0.6118 | val_main=0.577324
Epoch 039 | phase=2 | train_loss=0.6112 | val_main=0.570082
Epoch 040 | phase=2 | train_loss=0.6017 | val_main=0.668157
Epoch 041 | phase=2 | train_loss=0.6181 | val_main=0.739373
Epoch 042 | phase=2 | train_loss=0.6270 | val_main=0.713053
Epoch 043 | phase=2 | train_loss=0.6081 | val_main=0.527070
Epoch 044 | phase=2 | train_loss=0.6081 | val_main=0.759082
Epoch 045 | phase=2 | train_loss=0.5988 | val_main=0.632046
Epoch 046 | phase=2 | train_loss=0.5930 | val_main=0.584172
Epoch 047 | phase=2 | train_loss=0.6121 | val_main=0.640254
Epoch 048 | phase=2 | train_loss=0.6065 | val_main=0.655024
Epoch 049 | phase=2 | train_loss=0.5968 | val_main=0.627519
Epoch 050 | phase=2 | train_loss=0.5819 | val_main=0.690952

Parameters used in the single-fit model:
input_dim: 60
output_dim: 60
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.25488321
1 day(s) RMSE                      : 27.57625544
1 day(s) R2                        : -0.01097353
1 day(s) Pearson r                 : 0.03957808
1 day(s) QLIKE                     : 0.70596503
3 day(s) MAE                       : 4.32024049
3 day(s) RMSE                      : 27.60065759
3 day(s) R2                        : -0.01282182
3 day(s) Pearson r                 : 0.02563967
3 day(s) QLIKE                     : 0.72697813
5 day(s) MAE                       : 4.36985180
5 day(s) RMSE                      : 27.61791981
5 day(s) R2                        : -0.01414719
5 day(s) Pearson r                 : 0.01598163
5 day(s) QLIKE                     : 0.74136460
10 day(s) MAE                      : 4.45905872
10 day(s) RMSE                     : 27.63456302
10 day(s) R2                       : -0.01522085
10 day(s) Pearson r                : 0.00582403
10 day(s) QLIKE                    : 0.76284899
full horizon MAE                   : 4.45905872
full horizon RMSE                  : 27.63456302
full horizon R2                    : -0.01522085
full horizon Pearson r             : 0.00582403
full horizon QLIKE                 : 0.76284899

--- Task 2 ---
1 day(s) MAE                       : 0.05073575
1 day(s) RMSE                      : 0.06284417
1 day(s) R2                        : -1.75127794
1 day(s) Pearson r                 : -0.13408188
1 day(s) QLIKE                     : 2.01134565
3 day(s) MAE                       : 0.05101306
3 day(s) RMSE                      : 0.06307197
3 day(s) R2                        : -1.76421028
3 day(s) Pearson r                 : -0.13358261
3 day(s) QLIKE                     : 1.95233694
5 day(s) MAE                       : 0.05134241
5 day(s) RMSE                      : 0.06337672
5 day(s) R2                        : -1.78447474
5 day(s) Pearson r                 : -0.13323284
5 day(s) QLIKE                     : 1.89536137
10 day(s) MAE                      : 0.05228562
10 day(s) RMSE                     : 0.06429802
10 day(s) R2                       : -1.84491257
10 day(s) Pearson r                : -0.10299095
10 day(s) QLIKE                    : 1.76850636
full horizon MAE                   : 0.05228562
full horizon RMSE                  : 0.06429802
full horizon R2                    : -1.84491257
full horizon Pearson r             : -0.10299095
full horizon QLIKE                 : 1.76850636

--- Task 3 ---
1 day(s) MAE                       : 0.75196076
1 day(s) RMSE                      : 0.77351383
1 day(s) R2                        : -16.88755988
1 day(s) Pearson r                 : 0.14773393
1 day(s) QLIKE                     : 0.31000201
3 day(s) MAE                       : 0.75111297
3 day(s) RMSE                      : 0.77269566
3 day(s) R2                        : -16.90401190
3 day(s) Pearson r                 : 0.12388070
3 day(s) QLIKE                     : 0.30589960
5 day(s) MAE                       : 0.75026192
5 day(s) RMSE                      : 0.77180721
5 day(s) R2                        : -16.91862692
5 day(s) Pearson r                 : 0.12179292
5 day(s) QLIKE                     : 0.30398838
10 day(s) MAE                      : 0.74843740
10 day(s) RMSE                     : 0.76988052
10 day(s) R2                       : -16.98854503
10 day(s) Pearson r                : 0.11026435
10 day(s) QLIKE                    : 0.30442294
full horizon MAE                   : 0.74843740
full horizon RMSE                  : 0.76988052
full horizon R2                    : -16.98854503
full horizon Pearson r             : 0.11026435
full horizon QLIKE                 : 0.30442294

--- Task 4 ---
1 day(s) MAE                       : 0.56484725
1 day(s) RMSE                      : 0.99955626
1 day(s) R2                        : -0.18775904
1 day(s) Pearson r                 : 0.05570944
1 day(s) QLIKE                     : 0.61827089
3 day(s) MAE                       : 0.56614034
3 day(s) RMSE                      : 0.99960804
3 day(s) R2                        : -0.18887206
3 day(s) Pearson r                 : 0.05400108
3 day(s) QLIKE                     : 0.61078232
5 day(s) MAE                       : 0.56750615
5 day(s) RMSE                      : 1.00029456
5 day(s) R2                        : -0.19160779
5 day(s) Pearson r                 : 0.05837430
5 day(s) QLIKE                     : 0.60406199
10 day(s) MAE                      : 0.57087809
10 day(s) RMSE                     : 1.00227527
10 day(s) R2                       : -0.19948120
10 day(s) Pearson r                : 0.06760120
10 day(s) QLIKE                    : 0.58268537
full horizon MAE                   : 0.57087809
full horizon RMSE                  : 1.00227527
full horizon R2                    : -0.19948120
full horizon Pearson r             : 0.06760120
full horizon QLIKE                 : 0.58268537

--- Task 5 ---
1 day(s) MAE                       : 0.23230417
1 day(s) RMSE                      : 0.23950211
1 day(s) R2                        : -15.89067576
1 day(s) Pearson r                 : -0.02201168
1 day(s) QLIKE                     : 0.02992601
3 day(s) MAE                       : 0.23234204
3 day(s) RMSE                      : 0.23953830
3 day(s) R2                        : -15.89704844
3 day(s) Pearson r                 : -0.02923505
3 day(s) QLIKE                     : 0.02992349
5 day(s) MAE                       : 0.23243579
5 day(s) RMSE                      : 0.23962873
5 day(s) R2                        : -15.91100721
5 day(s) Pearson r                 : -0.02391884
5 day(s) QLIKE                     : 0.02991979
10 day(s) MAE                      : 0.23282017
10 day(s) RMSE                     : 0.24002700
10 day(s) R2                       : -15.90655724
10 day(s) Pearson r                : -0.01406606
10 day(s) QLIKE                    : 0.03001085
full horizon MAE                   : 0.23282017
full horizon RMSE                  : 0.24002700
full horizon R2                    : -15.90655724
full horizon Pearson r             : -0.01406606
full horizon QLIKE                 : 0.03001085

--- Task 6 ---
1 day(s) MAE                       : 1.70010573
1 day(s) RMSE                      : 2.11457338
1 day(s) R2                        : -0.21984058
1 day(s) Pearson r                 : -0.41616104
1 day(s) QLIKE                     : 0.10155590
3 day(s) MAE                       : 1.69780241
3 day(s) RMSE                      : 2.11245476
3 day(s) R2                        : -0.21688530
3 day(s) Pearson r                 : -0.41118193
3 day(s) QLIKE                     : 0.10145364
5 day(s) MAE                       : 1.69802696
5 day(s) RMSE                      : 2.11329518
5 day(s) R2                        : -0.21741100
5 day(s) Pearson r                 : -0.40822839
5 day(s) QLIKE                     : 0.10168460
10 day(s) MAE                      : 1.70049947
10 day(s) RMSE                     : 2.12017276
10 day(s) R2                       : -0.22425648
10 day(s) Pearson r                : -0.40632454
10 day(s) QLIKE                    : 0.10285897
full horizon MAE                   : 1.70049947
full horizon RMSE                  : 2.12017276
full horizon R2                    : -0.22425648
full horizon Pearson r             : -0.40632454
full horizon QLIKE                 : 0.10285897

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/GE/Custom_KAN_H10.pkl

Saved y_true min=0.591065, max=527.531
Saved y_pred min=1.19596, max=16.8004

=== GE | H=10 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2018
Time steps for y: 10
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.10128934238557
  Min value:  -0.033788504883313815
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.08059546680979
  Min value:  -8.14578298619116
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4221295497976101
  Min value:  -0.03368162544886536
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.079343715915981
  Min value:  -68.74402590450526
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4422974929765818
  Min value:  -0.03368162544886536
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.670127332275285
  Min value:  -3.0551258580114347
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 152s 450ms/step - loss: 0.7521 - val_loss: 1.0736 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 238ms/step - loss: 0.3828 - val_loss: 0.7090 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 14s 307ms/step - loss: 0.3246 - val_loss: 0.6760 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 14s 304ms/step - loss: 0.3116 - val_loss: 0.8499 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 14s 303ms/step - loss: 0.2973 - val_loss: 0.8219 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 14s 306ms/step - loss: 0.2942 - val_loss: 0.8486 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 14s 302ms/step - loss: 0.2863 - val_loss: 1.1835 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 0s 298ms/step - loss: 0.2806
Epoch 8: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
46/46 ━━━━━━━━━━━━━━━━━━━━ 14s 308ms/step - loss: 0.2747 - val_loss: 0.6975 - learning_rate: 5.0000e-04
Epoch 8: early stopping
Restoring model weights from the end of the best epoch: 3.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 10
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 5.05041420
1 day(s) RMSE                      : 27.43993195
1 day(s) R2                        : -0.00100272
1 day(s) Pearson r                 : -0.04849518
1 day(s) QLIKE                     : 0.68437663
3 day(s) MAE                       : 4.58339733
3 day(s) RMSE                      : 27.49517937
3 day(s) R2                        : -0.00509544
3 day(s) Pearson r                 : -0.00041208
3 day(s) QLIKE                     : 0.69505950
5 day(s) MAE                       : 4.47057793
5 day(s) RMSE                      : 27.51194401
5 day(s) R2                        : -0.00637913
5 day(s) Pearson r                 : -0.00049049
5 day(s) QLIKE                     : 0.69167173
10 day(s) MAE                      : 4.46564283
10 day(s) RMSE                     : 27.51220414
10 day(s) R2                       : -0.00625047
10 day(s) Pearson r                : -0.00027229
10 day(s) QLIKE                    : 0.68389297
full horizon MAE                   : 4.46564283
full horizon RMSE                  : 27.51220414
full horizon R2                    : -0.00625047
full horizon Pearson r             : -0.00027229
full horizon QLIKE                 : 0.68389297

--- Task 2 ---
1 day(s) MAE                       : 0.04987835
1 day(s) RMSE                      : 0.06183439
1 day(s) R2                        : -1.66357355
1 day(s) Pearson r                 : 0.05143913
1 day(s) QLIKE                     : 1.99186828
3 day(s) MAE                       : 0.05259499
3 day(s) RMSE                      : 0.06457532
3 day(s) R2                        : -1.89755267
3 day(s) Pearson r                 : -0.00599315
3 day(s) QLIKE                     : 3.17186284
5 day(s) MAE                       : 0.05356516
5 day(s) RMSE                      : 0.06551159
5 day(s) R2                        : -1.97522677
5 day(s) Pearson r                 : -0.00891282
5 day(s) QLIKE                     : 9.06000497
10 day(s) MAE                      : 0.05483590
10 day(s) RMSE                     : 0.06671752
10 day(s) R2                       : -2.06304611
10 day(s) Pearson r                : -0.01392376
10 day(s) QLIKE                    : 7.10708631
full horizon MAE                   : 0.05483590
full horizon RMSE                  : 0.06671752
full horizon R2                    : -2.06304611
full horizon Pearson r             : -0.01392376
full horizon QLIKE                 : 7.10708631

--- Task 3 ---
1 day(s) MAE                       : 0.75627812
1 day(s) RMSE                      : 0.77803206
1 day(s) R2                        : -17.09713894
1 day(s) Pearson r                 : -0.01507400
1 day(s) QLIKE                     : 0.10599631
3 day(s) MAE                       : 0.75414424
3 day(s) RMSE                      : 0.77588144
3 day(s) R2                        : -17.05195044
3 day(s) Pearson r                 : -0.00294819
3 day(s) QLIKE                     : 0.11993285
5 day(s) MAE                       : 0.75544674
5 day(s) RMSE                      : 0.77710176
5 day(s) R2                        : -17.16531164
5 day(s) Pearson r                 : 0.00481108
5 day(s) QLIKE                     : 1.76013472
10 day(s) MAE                      : 0.75546219
10 day(s) RMSE                     : 0.77694315
10 day(s) R2                       : -17.32010061
10 day(s) Pearson r                : 0.01005592
10 day(s) QLIKE                    : 3.61656013
full horizon MAE                   : 0.75546219
full horizon RMSE                  : 0.77694315
full horizon R2                    : -17.32010061
full horizon Pearson r             : 0.01005592
full horizon QLIKE                 : 3.61656013

--- Task 4 ---
1 day(s) MAE                       : 0.56371423
1 day(s) RMSE                      : 0.98352842
1 day(s) R2                        : -0.14997310
1 day(s) Pearson r                 : -0.01484540
1 day(s) QLIKE                     : 0.61887985
3 day(s) MAE                       : 0.57005062
3 day(s) RMSE                      : 0.99492273
3 day(s) R2                        : -0.17775333
3 day(s) Pearson r                 : -0.00213879
3 day(s) QLIKE                     : 0.62440202
5 day(s) MAE                       : 0.60581183
5 day(s) RMSE                      : 1.04285285
5 day(s) R2                        : -0.29516048
5 day(s) Pearson r                 : -0.00494130
5 day(s) QLIKE                     : 0.71734534
10 day(s) MAE                      : 0.68670954
10 day(s) RMSE                     : 1.11922678
10 day(s) R2                       : -0.49573825
10 day(s) Pearson r                : -0.00964162
10 day(s) QLIKE                    : 0.87395399
full horizon MAE                   : 0.68670954
full horizon RMSE                  : 1.11922678
full horizon R2                    : -0.49573825
full horizon Pearson r             : -0.00964162
full horizon QLIKE                 : 0.87395399

--- Task 5 ---
1 day(s) MAE                       : 0.23230417
1 day(s) RMSE                      : 0.23950211
1 day(s) R2                        : -15.89067575
1 day(s) Pearson r                 : nan
1 day(s) QLIKE                     : 0.02992601
3 day(s) MAE                       : 0.23234204
3 day(s) RMSE                      : 0.23953830
3 day(s) R2                        : -15.89704844
3 day(s) Pearson r                 : -0.00083126
3 day(s) QLIKE                     : 0.02992349
5 day(s) MAE                       : 0.23243579
5 day(s) RMSE                      : 0.23962873
5 day(s) R2                        : -15.91100721
5 day(s) Pearson r                 : -0.00180576
5 day(s) QLIKE                     : 0.02991979
10 day(s) MAE                      : 0.23282017
10 day(s) RMSE                     : 0.24002700
10 day(s) R2                       : -15.90655727
10 day(s) Pearson r                : -0.00642982
10 day(s) QLIKE                    : 0.03001085
full horizon MAE                   : 0.23282017
full horizon RMSE                  : 0.24002700
full horizon R2                    : -15.90655727
full horizon Pearson r             : -0.00642982
full horizon QLIKE                 : 0.03001085

--- Task 6 ---
1 day(s) MAE                       : 1.47661484
1 day(s) RMSE                      : 1.92056120
1 day(s) R2                        : -0.00626845
1 day(s) Pearson r                 : 0.10890396
1 day(s) QLIKE                     : 0.08064654
3 day(s) MAE                       : 1.41370734
3 day(s) RMSE                      : 1.99573257
3 day(s) R2                        : -0.08612423
3 day(s) Pearson r                 : 0.00279820
3 day(s) QLIKE                     : 0.08297267
5 day(s) MAE                       : 1.41844046
5 day(s) RMSE                      : 2.07746592
5 day(s) R2                        : -0.17648043
5 day(s) Pearson r                 : 0.00489015
5 day(s) QLIKE                     : 0.08531626
10 day(s) MAE                      : 1.46650282
10 day(s) RMSE                     : 2.22526901
10 day(s) R2                       : -0.34863663
10 day(s) Pearson r                : 0.01021813
10 day(s) QLIKE                    : 0.08859203
full horizon MAE                   : 1.46650282
full horizon RMSE                  : 2.22526901
full horizon R2                    : -0.34863663
full horizon Pearson r             : 0.01021813
full horizon QLIKE                 : 0.08859203

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/GE/Custom_KAN_LSTM_H10.pkl

Saved y_true min=0.591065, max=527.531
Saved y_pred min=3.38414, max=5.05276

=== GE | H=20 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 20
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.10128934238557
  Min value:  -0.033788504883313815
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.120224031597433
  Min value:  -8.143184816833994
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4221295497976101
  Min value:  -0.03368162544886536
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.084344850046558
  Min value:  -68.75078734214202
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4422974929765818
  Min value:  -0.03368162544886536
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.687942200751508
  Min value:  -3.0083215695863115
Epoch 001 | phase=1 | train_loss=5.0755 | val_main=0.939962
Epoch 002 | phase=1 | train_loss=4.9649 | val_main=0.939907
Epoch 003 | phase=1 | train_loss=4.6755 | val_main=0.939745
Epoch 004 | phase=1 | train_loss=4.5695 | val_main=0.939803
Epoch 005 | phase=1 | train_loss=4.6302 | val_main=0.939699
Epoch 006 | phase=1 | train_loss=4.5988 | val_main=0.939801
Epoch 007 | phase=1 | train_loss=4.5910 | val_main=0.939785
Epoch 008 | phase=1 | train_loss=4.6247 | val_main=0.939737
Epoch 009 | phase=1 | train_loss=4.5317 | val_main=0.939775
Epoch 010 | phase=1 | train_loss=4.5970 | val_main=0.939731
Epoch 011 | phase=1 | train_loss=4.5370 | val_main=0.939755
Epoch 012 | phase=1 | train_loss=4.5334 | val_main=0.939745
Epoch 013 | phase=1 | train_loss=4.5314 | val_main=0.939720
Epoch 014 | phase=1 | train_loss=4.5718 | val_main=0.939804
Epoch 015 | phase=1 | train_loss=4.4952 | val_main=0.939833
Epoch 016 | phase=0 | train_loss=5.5749 | val_main=0.936646
Epoch 017 | phase=0 | train_loss=5.5701 | val_main=0.956292
Epoch 018 | phase=0 | train_loss=5.5399 | val_main=0.989979
Epoch 019 | phase=0 | train_loss=5.5788 | val_main=0.972654
Epoch 020 | phase=0 | train_loss=5.5930 | val_main=0.967430
Epoch 021 | phase=0 | train_loss=5.6011 | val_main=0.979676
Epoch 022 | phase=0 | train_loss=5.5306 | val_main=0.985558
Epoch 023 | phase=0 | train_loss=5.5803 | val_main=0.985358
Epoch 024 | phase=0 | train_loss=5.5319 | val_main=0.995213
Epoch 025 | phase=0 | train_loss=5.4708 | val_main=0.560757
Epoch 026 | phase=0 | train_loss=5.3457 | val_main=1.027384
Epoch 027 | phase=0 | train_loss=5.2485 | val_main=0.630241
Epoch 028 | phase=0 | train_loss=5.1040 | val_main=0.681628
Epoch 029 | phase=0 | train_loss=4.9860 | val_main=0.580018
Epoch 030 | phase=0 | train_loss=4.8271 | val_main=0.528220
Epoch 031 | phase=2 | train_loss=0.7971 | val_main=0.627772
Epoch 032 | phase=2 | train_loss=0.7477 | val_main=0.609650
Epoch 033 | phase=2 | train_loss=0.7235 | val_main=0.555301
Epoch 034 | phase=2 | train_loss=0.6994 | val_main=0.624855
Epoch 035 | phase=2 | train_loss=0.6800 | val_main=0.615603
Epoch 036 | phase=2 | train_loss=0.6684 | val_main=0.569577
Epoch 037 | phase=2 | train_loss=0.6766 | val_main=0.595442
Epoch 038 | phase=2 | train_loss=0.6605 | val_main=0.616834
Epoch 039 | phase=2 | train_loss=0.6520 | val_main=0.561540
Epoch 040 | phase=2 | train_loss=0.6491 | val_main=0.607833
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 120
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.46386438
1 day(s) RMSE                      : 27.72736170
1 day(s) R2                        : -0.02208330
1 day(s) Pearson r                 : 0.01269703
1 day(s) QLIKE                     : 0.76380712
3 day(s) MAE                       : 4.42918499
3 day(s) RMSE                      : 27.72686957
3 day(s) R2                        : -0.02210584
3 day(s) Pearson r                 : 0.00371191
3 day(s) QLIKE                     : 0.76712094
5 day(s) MAE                       : 4.42595674
5 day(s) RMSE                      : 27.73136617
5 day(s) R2                        : -0.02249594
5 day(s) Pearson r                 : -0.00346349
5 day(s) QLIKE                     : 0.77220001
10 day(s) MAE                      : 4.45682796
10 day(s) RMSE                     : 27.73724845
10 day(s) R2                       : -0.02277965
10 day(s) Pearson r                : -0.01061715
10 day(s) QLIKE                    : 0.77858966
20 day(s) MAE                      : 4.49279193
20 day(s) RMSE                     : 27.74169292
20 day(s) R2                       : -0.02305331
20 day(s) Pearson r                : -0.02126558
20 day(s) QLIKE                    : 0.78356202
full horizon MAE                   : 4.49279193
full horizon RMSE                  : 27.74169292
full horizon R2                    : -0.02305331
full horizon Pearson r             : -0.02126558
full horizon QLIKE                 : 0.78356202

--- Task 2 ---
1 day(s) MAE                       : 0.09036602
1 day(s) RMSE                      : 0.11157310
1 day(s) R2                        : -7.67208472
1 day(s) Pearson r                 : -0.17976365
1 day(s) QLIKE                     : 3.48215153
3 day(s) MAE                       : 0.07943692
3 day(s) RMSE                      : 0.09844805
3 day(s) R2                        : -5.73461127
3 day(s) Pearson r                 : -0.12591352
3 day(s) QLIKE                     : 3.44228108
5 day(s) MAE                       : 0.07539323
5 day(s) RMSE                      : 0.09338653
5 day(s) R2                        : -5.04577912
5 day(s) Pearson r                 : -0.11822258
5 day(s) QLIKE                     : 3.37180130
10 day(s) MAE                      : 0.06939412
10 day(s) RMSE                     : 0.08642735
10 day(s) R2                       : -4.14015366
10 day(s) Pearson r                : -0.10445153
10 day(s) QLIKE                    : 3.25848366
20 day(s) MAE                      : 0.06294539
20 day(s) RMSE                     : 0.07966988
20 day(s) R2                       : -3.31558704
20 day(s) Pearson r                : -0.09020908
20 day(s) QLIKE                    : 3.00967463
full horizon MAE                   : 0.06294539
full horizon RMSE                  : 0.07966988
full horizon R2                    : -3.31558704
full horizon Pearson r             : -0.09020908
full horizon QLIKE                 : 3.00967463

--- Task 3 ---
1 day(s) MAE                       : 0.75894122
1 day(s) RMSE                      : 0.78055904
1 day(s) R2                        : -17.21488577
1 day(s) Pearson r                 : 0.14825552
1 day(s) QLIKE                     : 0.31420597
3 day(s) MAE                       : 0.75807803
3 day(s) RMSE                      : 0.77966139
3 day(s) R2                        : -17.22827082
3 day(s) Pearson r                 : 0.13872197
3 day(s) QLIKE                     : 0.30860630
5 day(s) MAE                       : 0.75726564
5 day(s) RMSE                      : 0.77880504
5 day(s) R2                        : -17.24502948
5 day(s) Pearson r                 : 0.13957058
5 day(s) QLIKE                     : 0.30468425
10 day(s) MAE                      : 0.75538832
10 day(s) RMSE                     : 0.77679596
10 day(s) R2                       : -17.31315986
10 day(s) Pearson r                : 0.13312134
10 day(s) QLIKE                    : 0.31418188
20 day(s) MAE                      : 0.75154994
20 day(s) RMSE                     : 0.77267058
20 day(s) R2                       : -17.46593661
20 day(s) Pearson r                : 0.12481308
20 day(s) QLIKE                    : 0.31876779
full horizon MAE                   : 0.75154994
full horizon RMSE                  : 0.77267058
full horizon R2                    : -17.46593661
full horizon Pearson r             : 0.12481308
full horizon QLIKE                 : 0.31876779

--- Task 4 ---
1 day(s) MAE                       : 0.56446928
1 day(s) RMSE                      : 0.99771069
1 day(s) R2                        : -0.18337694
1 day(s) Pearson r                 : 0.06088134
1 day(s) QLIKE                     : 0.61807555
3 day(s) MAE                       : 0.56584289
3 day(s) RMSE                      : 0.99805735
3 day(s) R2                        : -0.18518632
3 day(s) Pearson r                 : 0.05758842
3 day(s) QLIKE                     : 0.61073857
5 day(s) MAE                       : 0.56721899
5 day(s) RMSE                      : 0.99886209
5 day(s) R2                        : -0.18819735
5 day(s) Pearson r                 : 0.06287411
5 day(s) QLIKE                     : 0.60402805
10 day(s) MAE                      : 0.57051066
10 day(s) RMSE                     : 1.00076803
10 day(s) R2                       : -0.19587629
10 day(s) Pearson r                : 0.07028606
10 day(s) QLIKE                    : 0.58260911
20 day(s) MAE                      : 0.57658227
20 day(s) RMSE                     : 1.00319141
20 day(s) R2                       : -0.20887524
20 day(s) Pearson r                : 0.07462472
20 day(s) QLIKE                    : 0.54254482
full horizon MAE                   : 0.57658227
full horizon RMSE                  : 1.00319141
full horizon R2                    : -0.20887524
full horizon Pearson r             : 0.07462472
full horizon QLIKE                 : 0.54254482

--- Task 5 ---
1 day(s) MAE                       : 0.23230417
1 day(s) RMSE                      : 0.23950211
1 day(s) R2                        : -15.89067576
1 day(s) Pearson r                 : nan
1 day(s) QLIKE                     : 0.02992601
3 day(s) MAE                       : 0.23234204
3 day(s) RMSE                      : 0.23953830
3 day(s) R2                        : -15.89704844
3 day(s) Pearson r                 : -0.01662874
3 day(s) QLIKE                     : 0.02992349
5 day(s) MAE                       : 0.23243579
5 day(s) RMSE                      : 0.23962873
5 day(s) R2                        : -15.91100721
5 day(s) Pearson r                 : -0.01824617
5 day(s) QLIKE                     : 0.02991979
10 day(s) MAE                      : 0.23282017
10 day(s) RMSE                     : 0.24002700
10 day(s) R2                       : -15.90655724
10 day(s) Pearson r                : -0.01260510
10 day(s) QLIKE                    : 0.03001085
20 day(s) MAE                      : 0.23367059
20 day(s) RMSE                     : 0.24090050
20 day(s) R2                       : -15.91381390
20 day(s) Pearson r                : 0.02276025
20 day(s) QLIKE                    : 0.03017665
full horizon MAE                   : 0.23367059
full horizon RMSE                  : 0.24090050
full horizon R2                    : -15.91381390
full horizon Pearson r             : 0.02276025
full horizon QLIKE                 : 0.03017665

--- Task 6 ---
1 day(s) MAE                       : 1.48433475
1 day(s) RMSE                      : 2.31693234
1 day(s) R2                        : -0.46448273
1 day(s) Pearson r                 : -0.21205263
1 day(s) QLIKE                     : 0.09742487
3 day(s) MAE                       : 1.47858894
3 day(s) RMSE                      : 2.31481917
3 day(s) R2                        : -0.46119760
3 day(s) Pearson r                 : -0.20553650
3 day(s) QLIKE                     : 0.09721400
5 day(s) MAE                       : 1.47795250
5 day(s) RMSE                      : 2.32336384
5 day(s) R2                        : -0.47146977
5 day(s) Pearson r                 : -0.20734651
5 day(s) QLIKE                     : 0.09789629
10 day(s) MAE                      : 1.47010313
10 day(s) RMSE                     : 2.32876546
10 day(s) R2                       : -0.47700312
10 day(s) Pearson r                : -0.20126904
10 day(s) QLIKE                    : 0.09848692
20 day(s) MAE                      : 1.45007564
20 day(s) RMSE                     : 2.32277178
20 day(s) R2                       : -0.46916804
20 day(s) Pearson r                : -0.19246170
20 day(s) QLIKE                    : 0.09901890
full horizon MAE                   : 1.45007564
full horizon RMSE                  : 2.32277178
full horizon R2                    : -0.46916804
full horizon Pearson r             : -0.19246170
full horizon QLIKE                 : 0.09901890

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/GE/Custom_KAN_H20.pkl

Saved y_true min=0.591065, max=527.531
Saved y_pred min=1.12347, max=7.96721

=== GE | H=20 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2018
Time steps for y: 20
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  38.10128934238557
  Min value:  -0.033788504883313815
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.120224031597433
  Min value:  -8.143184816833994
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4221295497976101
  Min value:  -0.03368162544886536
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.084344850046558
  Min value:  -68.75078734214202
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  0.4422974929765818
  Min value:  -0.03368162544886536
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.687942200751508
  Min value:  -3.0083215695863115
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 120s 512ms/step - loss: 0.7411 - val_loss: 0.9876 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 15s 318ms/step - loss: 0.3435 - val_loss: 1.0415 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 448ms/step - loss: 0.2819 - val_loss: 0.9291 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 448ms/step - loss: 0.2595 - val_loss: 0.7508 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 448ms/step - loss: 0.2480 - val_loss: 1.1599 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 447ms/step - loss: 0.2364 - val_loss: 0.6969 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 450ms/step - loss: 0.2232 - val_loss: 0.6964 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 20s 445ms/step - loss: 0.2151 - val_loss: 1.3415 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 450ms/step - loss: 0.2117 - val_loss: 0.5845 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=10 tf_ratio=0.526 -> TF=ON
Epoch 10/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 449ms/step - loss: 0.2281 - val_loss: 1.2592 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=11 tf_ratio=0.474 -> TF=ON
Epoch 11/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 20s 444ms/step - loss: 0.2061 - val_loss: 0.5329 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=12 tf_ratio=0.421 -> TF=ON
Epoch 12/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 448ms/step - loss: 0.1961 - val_loss: 0.5605 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=13 tf_ratio=0.368 -> TF=ON
Epoch 13/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 449ms/step - loss: 0.1883 - val_loss: 0.6122 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=14 tf_ratio=0.316 -> TF=ON
Epoch 14/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 465ms/step - loss: 0.1879 - val_loss: 0.6427 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=15 tf_ratio=0.263 -> TF=ON
Epoch 15/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 20s 445ms/step - loss: 0.1856 - val_loss: 0.6187 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=16 tf_ratio=0.211 -> TF=ON
Epoch 16/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 0s 437ms/step - loss: 0.1891
Epoch 16: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 451ms/step - loss: 0.1876 - val_loss: 1.1144 - learning_rate: 5.0000e-04
Epoch 16: early stopping
Restoring model weights from the end of the best epoch: 11.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 20
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 5.19848697
1 day(s) RMSE                      : 27.43351981
1 day(s) R2                        : -0.00053495
1 day(s) Pearson r                 : -0.05882689
1 day(s) QLIKE                     : 0.68447001
3 day(s) MAE                       : 4.53457541
3 day(s) RMSE                      : 27.53339268
3 day(s) R2                        : -0.00789119
3 day(s) Pearson r                 : -0.00034660
3 day(s) QLIKE                     : 0.71691761
5 day(s) MAE                       : 4.49956069
5 day(s) RMSE                      : 27.52036699
5 day(s) R2                        : -0.00699544
5 day(s) Pearson r                 : -0.00021765
5 day(s) QLIKE                     : 0.70348857
10 day(s) MAE                      : 4.39247452
10 day(s) RMSE                     : 27.54831759
10 day(s) R2                       : -0.00889388
10 day(s) Pearson r                : -0.00090567
10 day(s) QLIKE                    : 0.69463188
20 day(s) MAE                      : 4.31833896
20 day(s) RMSE                     : 27.57918393
20 day(s) R2                       : -0.01110246
20 day(s) Pearson r                : -0.00219196
20 day(s) QLIKE                    : 0.68773371
full horizon MAE                   : 4.31833896
full horizon RMSE                  : 27.57918393
full horizon R2                    : -0.01110246
full horizon Pearson r             : -0.00219196
full horizon QLIKE                 : 0.68773371

--- Task 2 ---
1 day(s) MAE                       : 0.05080419
1 day(s) RMSE                      : 0.06277969
1 day(s) R2                        : -1.74563534
1 day(s) Pearson r                 : -0.04271951
1 day(s) QLIKE                     : 1.99194715
3 day(s) MAE                       : 0.05320894
3 day(s) RMSE                      : 0.06517598
3 day(s) R2                        : -1.95170810
3 day(s) Pearson r                 : -0.00565711
3 day(s) QLIKE                     : 8.04835129
5 day(s) MAE                       : 0.05393353
5 day(s) RMSE                      : 0.06586753
5 day(s) R2                        : -2.00764465
5 day(s) Pearson r                 : -0.00794331
5 day(s) QLIKE                     : 7.26690474
10 day(s) MAE                      : 0.05502009
10 day(s) RMSE                     : 0.06689252
10 day(s) R2                       : -2.07913568
10 day(s) Pearson r                : -0.01203480
10 day(s) QLIKE                    : 5.19338633
20 day(s) MAE                      : 0.05670625
20 day(s) RMSE                     : 0.06843971
20 day(s) R2                       : -2.18469501
20 day(s) Pearson r                : -0.01744371
20 day(s) QLIKE                    : 3.30555307
full horizon MAE                   : 0.05670625
full horizon RMSE                  : 0.06843971
full horizon R2                    : -2.18469501
full horizon Pearson r             : -0.01744371
full horizon QLIKE                 : 3.30555307

--- Task 3 ---
1 day(s) MAE                       : 0.75621302
1 day(s) RMSE                      : 0.77796814
1 day(s) R2                        : -17.09416557
1 day(s) Pearson r                 : 0.01542727
1 day(s) QLIKE                     : 0.10599362
3 day(s) MAE                       : 0.75858821
3 day(s) RMSE                      : 0.78024485
3 day(s) R2                        : -17.25556292
3 day(s) Pearson r                 : 0.00324093
3 day(s) QLIKE                     : 2.81606161
5 day(s) MAE                       : 0.75841297
5 day(s) RMSE                      : 0.78001293
5 day(s) R2                        : -17.30166768
5 day(s) Pearson r                 : 0.00453717
5 day(s) QLIKE                     : 1.75185913
10 day(s) MAE                      : 0.75694531
10 day(s) RMSE                     : 0.77840039
10 day(s) R2                       : -17.38888767
10 day(s) Pearson r                : 0.00661807
10 day(s) QLIKE                    : 0.93461318
20 day(s) MAE                      : 0.75331976
20 day(s) RMSE                     : 0.77447749
20 day(s) R2                       : -17.55240408
20 day(s) Pearson r                : 0.00953152
20 day(s) QLIKE                    : 0.52083293
full horizon MAE                   : 0.75331976
full horizon RMSE                  : 0.77447749
full horizon R2                    : -17.55240408
full horizon Pearson r             : 0.00953152
full horizon QLIKE                 : 0.52083293

--- Task 4 ---
1 day(s) MAE                       : 0.56543870
1 day(s) RMSE                      : 0.97057041
1 day(s) R2                        : -0.11987087
1 day(s) Pearson r                 : -0.02661751
1 day(s) QLIKE                     : 0.61888085
3 day(s) MAE                       : 0.58044323
3 day(s) RMSE                      : 0.95392514
3 day(s) R2                        : -0.08269024
3 day(s) Pearson r                 : 0.00166384
3 day(s) QLIKE                     : 0.61519239
5 day(s) MAE                       : 0.58990335
5 day(s) RMSE                      : 0.94718124
5 day(s) R2                        : -0.06842413
5 day(s) Pearson r                 : 0.00413751
5 day(s) QLIKE                     : 0.60964596
10 day(s) MAE                      : 0.58582276
10 day(s) RMSE                     : 0.96120324
10 day(s) R2                       : -0.10318885
10 day(s) Pearson r                : -0.00643573
10 day(s) QLIKE                    : 0.59644222
20 day(s) MAE                      : 0.59895996
20 day(s) RMSE                     : 1.01103438
20 day(s) R2                       : -0.22785115
20 day(s) Pearson r                : -0.01560695
20 day(s) QLIKE                    : 0.59253924
full horizon MAE                   : 0.59895996
full horizon RMSE                  : 1.01103438
full horizon R2                    : -0.22785115
full horizon Pearson r             : -0.01560695
full horizon QLIKE                 : 0.59253924

--- Task 5 ---
1 day(s) MAE                       : 0.23230417
1 day(s) RMSE                      : 0.23950211
1 day(s) R2                        : -15.89067576
1 day(s) Pearson r                 : nan
1 day(s) QLIKE                     : 0.02992601
3 day(s) MAE                       : 0.23234204
3 day(s) RMSE                      : 0.23953830
3 day(s) R2                        : -15.89704847
3 day(s) Pearson r                 : -0.00058495
3 day(s) QLIKE                     : 0.02992349
5 day(s) MAE                       : 0.23243579
5 day(s) RMSE                      : 0.23962873
5 day(s) R2                        : -15.91100724
5 day(s) Pearson r                 : -0.00142483
5 day(s) QLIKE                     : 0.02991979
10 day(s) MAE                      : 0.23282017
10 day(s) RMSE                     : 0.24002700
10 day(s) R2                       : -15.90655728
10 day(s) Pearson r                : -0.00366818
10 day(s) QLIKE                    : 0.03001085
20 day(s) MAE                      : 0.23367059
20 day(s) RMSE                     : 0.24090050
20 day(s) R2                       : -15.91381394
20 day(s) Pearson r                : -0.00660574
20 day(s) QLIKE                    : 0.03017665
full horizon MAE                   : 0.23367059
full horizon RMSE                  : 0.24090050
full horizon R2                    : -15.91381394
full horizon Pearson r             : -0.00660574
full horizon QLIKE                 : 0.03017665

--- Task 6 ---
1 day(s) MAE                       : 1.50310610
1 day(s) RMSE                      : 1.91623454
1 day(s) R2                        : -0.00173968
1 day(s) Pearson r                 : 0.09998421
1 day(s) QLIKE                     : 0.08064532
3 day(s) MAE                       : 1.43997722
3 day(s) RMSE                      : 1.93631179
3 day(s) R2                        : -0.02241071
3 day(s) Pearson r                 : 0.00235037
3 day(s) QLIKE                     : 0.08120932
5 day(s) MAE                       : 1.45198560
5 day(s) RMSE                      : 1.93055630
5 day(s) R2                        : -0.01597223
5 day(s) Pearson r                 : -0.00044356
5 day(s) QLIKE                     : 0.08123932
10 day(s) MAE                      : 1.42779203
10 day(s) RMSE                     : 1.93966475
10 day(s) R2                       : -0.02466820
10 day(s) Pearson r                : 0.00721687
10 day(s) QLIKE                    : 0.08165337
20 day(s) MAE                      : 1.38456770
20 day(s) RMSE                     : 1.98541610
20 day(s) R2                       : -0.07339969
20 day(s) Pearson r                : 0.02086458
20 day(s) QLIKE                    : 0.08324364
full horizon MAE                   : 1.38456770
full horizon RMSE                  : 1.98541610
full horizon R2                    : -0.07339969
full horizon Pearson r             : 0.02086458
full horizon QLIKE                 : 0.08324364

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/GE/Custom_KAN_LSTM_H20.pkl

Saved y_true min=0.591065, max=527.531
Saved y_pred min=2.48686, max=5.2922

=== BAC | H=1 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.142355974179488
  Min value:  -0.42151023550938554
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.107742786547419
  Min value:  -11.49125811580691
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.800014951166767
  Min value:  -0.36472825885137616
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.2542888447212457
  Min value:  -1.9794140488614782
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.111754968787256
  Min value:  -0.38915813676977
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.207108669727662
  Min value:  -11.49125811580691
Epoch 001 | phase=1 | train_loss=4.9526 | val_main=0.561224
Epoch 002 | phase=1 | train_loss=4.8670 | val_main=0.561167
Epoch 003 | phase=1 | train_loss=5.0016 | val_main=0.561091
Epoch 004 | phase=1 | train_loss=4.6199 | val_main=0.561106
Epoch 005 | phase=1 | train_loss=4.4244 | val_main=0.561048
Epoch 006 | phase=1 | train_loss=4.5209 | val_main=0.561019
Epoch 007 | phase=1 | train_loss=4.3559 | val_main=0.561027
Epoch 008 | phase=1 | train_loss=4.1812 | val_main=0.560951
Epoch 009 | phase=1 | train_loss=4.4389 | val_main=0.560660
Epoch 010 | phase=1 | train_loss=4.0400 | val_main=0.560935
Epoch 011 | phase=1 | train_loss=3.9436 | val_main=0.560706
Epoch 012 | phase=1 | train_loss=3.9575 | val_main=0.560858
Epoch 013 | phase=1 | train_loss=3.8274 | val_main=0.560862
Epoch 014 | phase=1 | train_loss=3.7241 | val_main=0.560823
Epoch 015 | phase=1 | train_loss=3.5603 | val_main=0.560818
Epoch 016 | phase=0 | train_loss=4.4668 | val_main=0.530514
Epoch 017 | phase=0 | train_loss=4.2924 | val_main=0.472941
Epoch 018 | phase=0 | train_loss=4.1999 | val_main=0.483452
Epoch 019 | phase=0 | train_loss=3.7790 | val_main=0.478276
Epoch 020 | phase=0 | train_loss=3.7421 | val_main=0.469554
Epoch 021 | phase=0 | train_loss=3.3840 | val_main=0.501593
Epoch 022 | phase=0 | train_loss=3.4689 | val_main=0.493420
Epoch 023 | phase=0 | train_loss=3.4739 | val_main=0.482293
Epoch 024 | phase=0 | train_loss=3.0567 | val_main=0.500483
Epoch 025 | phase=0 | train_loss=3.1525 | val_main=0.506236
Epoch 026 | phase=0 | train_loss=3.0683 | val_main=0.490539
Epoch 027 | phase=0 | train_loss=2.9381 | val_main=0.533375
Epoch 028 | phase=0 | train_loss=3.3865 | val_main=0.477815
Epoch 029 | phase=0 | train_loss=2.7216 | val_main=0.511413
Epoch 030 | phase=0 | train_loss=2.8267 | val_main=0.518688
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 6
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.21876415
1 day(s) RMSE                      : 7.40398113
1 day(s) R2                        : 0.09028098
1 day(s) Pearson r                 : 0.36025642
1 day(s) QLIKE                     : 0.33964769
full horizon MAE                   : 2.21876415
full horizon RMSE                  : 7.40398113
full horizon R2                    : 0.09028098
full horizon Pearson r             : 0.36025642
full horizon QLIKE                 : 0.33964769

--- Task 2 ---
1 day(s) MAE                       : 0.13082751
1 day(s) RMSE                      : 0.16365420
1 day(s) R2                        : -14.24392601
1 day(s) Pearson r                 : -0.04182469
1 day(s) QLIKE                     : 11.22750171
full horizon MAE                   : 0.13082751
full horizon RMSE                  : 0.16365420
full horizon R2                    : -14.24392601
full horizon Pearson r             : -0.04182469
full horizon QLIKE                 : 11.22750171

--- Task 3 ---
1 day(s) MAE                       : 0.32589114
1 day(s) RMSE                      : 0.38367963
1 day(s) R2                        : -0.16284002
1 day(s) Pearson r                 : 0.14608751
1 day(s) QLIKE                     : 6.86966061
full horizon MAE                   : 0.32589114
full horizon RMSE                  : 0.38367963
full horizon R2                    : -0.16284002
full horizon Pearson r             : 0.14608751
full horizon QLIKE                 : 6.86966061

--- Task 4 ---
1 day(s) MAE                       : 0.82547524
1 day(s) RMSE                      : 0.97408619
1 day(s) R2                        : -0.78516268
1 day(s) Pearson r                 : 0.03934078
1 day(s) QLIKE                     : 0.71555974
full horizon MAE                   : 0.82547524
full horizon RMSE                  : 0.97408619
full horizon R2                    : -0.78516268
full horizon Pearson r             : 0.03934078
full horizon QLIKE                 : 0.71555974

--- Task 5 ---
1 day(s) MAE                       : 0.03735234
1 day(s) RMSE                      : 0.05129185
1 day(s) R2                        : -0.56887407
1 day(s) Pearson r                 : 0.48563714
1 day(s) QLIKE                     : 6.19513565
full horizon MAE                   : 0.03735234
full horizon RMSE                  : 0.05129185
full horizon R2                    : -0.56887407
full horizon Pearson r             : 0.48563714
full horizon QLIKE                 : 6.19513565

--- Task 6 ---
1 day(s) MAE                       : 1.75708952
1 day(s) RMSE                      : 3.71954191
1 day(s) R2                        : -12.00419343
1 day(s) Pearson r                 : -0.26617078
1 day(s) QLIKE                     : 0.06822721
full horizon MAE                   : 1.75708952
full horizon RMSE                  : 3.71954191
full horizon R2                    : -12.00419343
full horizon Pearson r             : -0.26617078
full horizon QLIKE                 : 0.06822721

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/BAC/Custom_KAN_H1.pkl

Saved y_true min=0.587715, max=111.472
Saved y_pred min=1.67265, max=15.1327

=== BAC | H=1 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2018
Time steps for y: 1
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.142355974179488
  Min value:  -0.42151023550938554
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.107742786547419
  Min value:  -11.49125811580691
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.800014951166767
  Min value:  -0.36472825885137616
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.2542888447212457
  Min value:  -1.9794140488614782
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.111754968787256
  Min value:  -0.38915813676977
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.207108669727662
  Min value:  -11.49125811580691
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 131s 456ms/step - loss: 1.0130 - val_loss: 0.6371 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 8s 171ms/step - loss: 0.9819 - val_loss: 0.5580 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 188ms/step - loss: 0.9454 - val_loss: 0.4752 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 197ms/step - loss: 0.9255 - val_loss: 0.4638 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 195ms/step - loss: 0.9214 - val_loss: 0.4710 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 189ms/step - loss: 0.9181 - val_loss: 0.4994 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 189ms/step - loss: 0.9095 - val_loss: 0.4556 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 188ms/step - loss: 0.9097 - val_loss: 0.4333 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 187ms/step - loss: 0.8930 - val_loss: 0.4250 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=10 tf_ratio=0.526 -> TF=ON
Epoch 10/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 191ms/step - loss: 0.8843 - val_loss: 0.4209 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=11 tf_ratio=0.474 -> TF=ON
Epoch 11/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 10s 213ms/step - loss: 0.8859 - val_loss: 0.4250 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=12 tf_ratio=0.421 -> TF=ON
Epoch 12/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 192ms/step - loss: 0.8883 - val_loss: 0.4193 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=13 tf_ratio=0.368 -> TF=ON
Epoch 13/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 189ms/step - loss: 0.8898 - val_loss: 0.4159 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=14 tf_ratio=0.316 -> TF=ON
Epoch 14/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 188ms/step - loss: 0.8884 - val_loss: 0.4039 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=15 tf_ratio=0.263 -> TF=ON
Epoch 15/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 10s 224ms/step - loss: 0.8811 - val_loss: 0.3900 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=16 tf_ratio=0.211 -> TF=ON
Epoch 16/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 190ms/step - loss: 0.8761 - val_loss: 0.3932 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=17 tf_ratio=0.158 -> TF=ON
Epoch 17/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 190ms/step - loss: 0.8735 - val_loss: 0.3929 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=18 tf_ratio=0.105 -> TF=ON
Epoch 18/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 189ms/step - loss: 0.8725 - val_loss: 0.4055 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=19 tf_ratio=0.053 -> TF=ON
Epoch 19/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 188ms/step - loss: 0.8703 - val_loss: 0.3816 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=20 tf_ratio=0.000 -> TF=OFF
Epoch 20/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 189ms/step - loss: 0.8723 - val_loss: 0.3862 - learning_rate: 5.0000e-04
Restoring model weights from the end of the best epoch: 19.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 1
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 1.98465755
1 day(s) RMSE                      : 7.11367405
1 day(s) R2                        : 0.16022180
1 day(s) Pearson r                 : 0.43776217
1 day(s) QLIKE                     : 0.27296818
full horizon MAE                   : 1.98465755
full horizon RMSE                  : 7.11367405
full horizon R2                    : 0.16022180
full horizon Pearson r             : 0.43776217
full horizon QLIKE                 : 0.27296818

--- Task 2 ---
1 day(s) MAE                       : 0.07046811
1 day(s) RMSE                      : 0.08164147
1 day(s) R2                        : -2.79371040
1 day(s) Pearson r                 : 0.10880732
1 day(s) QLIKE                     : 2.85652642
full horizon MAE                   : 0.07046811
full horizon RMSE                  : 0.08164147
full horizon R2                    : -2.79371040
full horizon Pearson r             : 0.10880732
full horizon QLIKE                 : 2.85652642

--- Task 3 ---
1 day(s) MAE                       : 0.31458458
1 day(s) RMSE                      : 0.35822679
1 day(s) R2                        : -0.01367471
1 day(s) Pearson r                 : 0.11727377
1 day(s) QLIKE                     : 6.83185088
full horizon MAE                   : 0.31458458
full horizon RMSE                  : 0.35822679
full horizon R2                    : -0.01367471
full horizon Pearson r             : 0.11727377
full horizon QLIKE                 : 6.83185088

--- Task 4 ---
1 day(s) MAE                       : 0.96626179
1 day(s) RMSE                      : 1.19557676
1 day(s) R2                        : -1.68929194
1 day(s) Pearson r                 : -0.09753670
1 day(s) QLIKE                     : 0.47578254
full horizon MAE                   : 0.96626179
full horizon RMSE                  : 1.19557676
full horizon R2                    : -1.68929194
full horizon Pearson r             : -0.09753670
full horizon QLIKE                 : 0.47578254

--- Task 5 ---
1 day(s) MAE                       : 0.05084285
1 day(s) RMSE                      : 0.06448893
1 day(s) R2                        : -1.48005719
1 day(s) Pearson r                 : -0.00260489
1 day(s) QLIKE                     : 8.42518401
full horizon MAE                   : 0.05084285
full horizon RMSE                  : 0.06448893
full horizon R2                    : -1.48005719
full horizon Pearson r             : -0.00260489
full horizon QLIKE                 : 8.42518401

--- Task 6 ---
1 day(s) MAE                       : 1.48002148
1 day(s) RMSE                      : 1.98035446
1 day(s) R2                        : -2.68629739
1 day(s) Pearson r                 : -0.12405966
1 day(s) QLIKE                     : 0.03828055
full horizon MAE                   : 1.48002148
full horizon RMSE                  : 1.98035446
full horizon R2                    : -2.68629739
full horizon Pearson r             : -0.12405966
full horizon QLIKE                 : 0.03828055

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/BAC/Custom_KAN_LSTM_H1.pkl

Saved y_true min=0.587715, max=111.472
Saved y_pred min=1.57626, max=25.6446

=== BAC | H=5 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.142355974179488
  Min value:  -0.42151023550938554
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.106232115580254
  Min value:  -11.491304399558798
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.800014951166767
  Min value:  -0.36472825885137616
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.253588840112363
  Min value:  -1.9710462778525513
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.111754968787256
  Min value:  -0.38915813676977
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.205991919409481
  Min value:  -11.491304399558798
Epoch 001 | phase=1 | train_loss=4.9608 | val_main=0.545846
Epoch 002 | phase=1 | train_loss=4.8887 | val_main=0.545850
Epoch 003 | phase=1 | train_loss=4.9592 | val_main=0.545835
Epoch 004 | phase=1 | train_loss=4.7590 | val_main=0.545765
Epoch 005 | phase=1 | train_loss=4.4544 | val_main=0.545720
Epoch 006 | phase=1 | train_loss=4.5743 | val_main=0.545616
Epoch 007 | phase=1 | train_loss=4.4008 | val_main=0.545554
Epoch 008 | phase=1 | train_loss=4.2387 | val_main=0.545521
Epoch 009 | phase=1 | train_loss=4.2967 | val_main=0.545559
Epoch 010 | phase=1 | train_loss=4.0212 | val_main=0.545552
Epoch 011 | phase=1 | train_loss=3.9427 | val_main=0.545646
Epoch 012 | phase=1 | train_loss=4.0139 | val_main=0.545717
Epoch 013 | phase=1 | train_loss=3.8533 | val_main=0.545851
Epoch 014 | phase=1 | train_loss=3.7618 | val_main=0.545951
Epoch 015 | phase=1 | train_loss=3.6038 | val_main=0.546010
Epoch 016 | phase=0 | train_loss=4.5601 | val_main=0.535201
Epoch 017 | phase=0 | train_loss=4.3909 | val_main=0.505597
Epoch 018 | phase=0 | train_loss=4.1172 | val_main=0.504342
Epoch 019 | phase=0 | train_loss=3.8271 | val_main=0.527927
Epoch 020 | phase=0 | train_loss=3.6379 | val_main=0.497503
Epoch 021 | phase=0 | train_loss=3.5965 | val_main=0.525054
Epoch 022 | phase=0 | train_loss=3.4969 | val_main=0.528197
Epoch 023 | phase=0 | train_loss=3.5554 | val_main=0.523914
Epoch 024 | phase=0 | train_loss=3.2221 | val_main=0.527437
Epoch 025 | phase=0 | train_loss=3.2021 | val_main=0.532239
Epoch 026 | phase=0 | train_loss=3.2224 | val_main=0.519378
Epoch 027 | phase=0 | train_loss=3.1668 | val_main=0.529019
Epoch 028 | phase=0 | train_loss=3.3193 | val_main=0.515560
Epoch 029 | phase=0 | train_loss=2.9513 | val_main=0.522586
Epoch 030 | phase=0 | train_loss=2.9092 | val_main=0.522638
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 30
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.18270130
1 day(s) RMSE                      : 7.44395402
1 day(s) R2                        : 0.08043161
1 day(s) Pearson r                 : 0.36082149
1 day(s) QLIKE                     : 0.35228798
3 day(s) MAE                       : 2.25918357
3 day(s) RMSE                      : 7.60902325
3 day(s) R2                        : 0.03981017
3 day(s) Pearson r                 : 0.24484494
3 day(s) QLIKE                     : 0.39561364
5 day(s) MAE                       : 2.31575953
5 day(s) RMSE                      : 7.72861629
5 day(s) R2                        : 0.00969592
5 day(s) Pearson r                 : 0.16282191
5 day(s) QLIKE                     : 0.42301109
full horizon MAE                   : 2.31575953
full horizon RMSE                  : 7.72861629
full horizon R2                    : 0.00969592
full horizon Pearson r             : 0.16282191
full horizon QLIKE                 : 0.42301109

--- Task 2 ---
1 day(s) MAE                       : 0.10867727
1 day(s) RMSE                      : 0.14486375
1 day(s) R2                        : -10.94433524
1 day(s) Pearson r                 : -0.01677645
1 day(s) QLIKE                     : 13.21323228
3 day(s) MAE                       : 0.10526507
3 day(s) RMSE                      : 0.13500638
3 day(s) R2                        : -9.45863205
3 day(s) Pearson r                 : -0.01915310
3 day(s) QLIKE                     : 13.12658592
5 day(s) MAE                       : 0.10958367
5 day(s) RMSE                      : 0.14231834
5 day(s) R2                        : -10.71375158
5 day(s) Pearson r                 : -0.01869816
5 day(s) QLIKE                     : 13.08543450
full horizon MAE                   : 0.10958367
full horizon RMSE                  : 0.14231834
full horizon R2                    : -10.71375158
full horizon Pearson r             : -0.01869816
full horizon QLIKE                 : 13.08543450

--- Task 3 ---
1 day(s) MAE                       : 0.31232802
1 day(s) RMSE                      : 0.37564997
1 day(s) R2                        : -0.11467738
1 day(s) Pearson r                 : 0.19452757
1 day(s) QLIKE                     : 6.85341585
3 day(s) MAE                       : 0.31462650
3 day(s) RMSE                      : 0.37763323
3 day(s) R2                        : -0.12642251
3 day(s) Pearson r                 : 0.19146907
3 day(s) QLIKE                     : 6.84985210
5 day(s) MAE                       : 0.31629084
5 day(s) RMSE                      : 0.38005361
5 day(s) R2                        : -0.14086344
5 day(s) Pearson r                 : 0.17880762
5 day(s) QLIKE                     : 6.85300999
full horizon MAE                   : 0.31629084
full horizon RMSE                  : 0.38005361
full horizon R2                    : -0.14086344
full horizon Pearson r             : 0.17880762
full horizon QLIKE                 : 6.85300999

--- Task 4 ---
1 day(s) MAE                       : 0.76485129
1 day(s) RMSE                      : 0.93651184
1 day(s) R2                        : -0.65009735
1 day(s) Pearson r                 : 0.10229135
1 day(s) QLIKE                     : 1.00349636
3 day(s) MAE                       : 0.76521506
3 day(s) RMSE                      : 0.93768434
3 day(s) R2                        : -0.65611012
3 day(s) Pearson r                 : 0.10983806
3 day(s) QLIKE                     : 1.01056388
5 day(s) MAE                       : 0.76404773
5 day(s) RMSE                      : 0.93487645
5 day(s) R2                        : -0.64806505
5 day(s) Pearson r                 : 0.10885924
5 day(s) QLIKE                     : 1.02972328
full horizon MAE                   : 0.76404773
full horizon RMSE                  : 0.93487645
full horizon R2                    : -0.64806505
full horizon Pearson r             : 0.10885924
full horizon QLIKE                 : 1.02972328

--- Task 5 ---
1 day(s) MAE                       : 0.03860649
1 day(s) RMSE                      : 0.04944255
1 day(s) R2                        : -0.45778359
1 day(s) Pearson r                 : 0.61673544
1 day(s) QLIKE                     : 5.95688339
3 day(s) MAE                       : 0.03879512
3 day(s) RMSE                      : 0.05016314
3 day(s) R2                        : -0.50559273
3 day(s) Pearson r                 : 0.58891949
3 day(s) QLIKE                     : 6.01982210
5 day(s) MAE                       : 0.03947484
5 day(s) RMSE                      : 0.05134332
5 day(s) R2                        : -0.58126195
5 day(s) Pearson r                 : 0.57827273
5 day(s) QLIKE                     : 6.00109497
full horizon MAE                   : 0.03947484
full horizon RMSE                  : 0.05134332
full horizon R2                    : -0.58126195
full horizon Pearson r             : 0.57827273
full horizon QLIKE                 : 6.00109497

--- Task 6 ---
1 day(s) MAE                       : 1.73060874
1 day(s) RMSE                      : 3.59874331
1 day(s) R2                        : -11.17324183
1 day(s) Pearson r                 : -0.27209082
1 day(s) QLIKE                     : 0.06991260
3 day(s) MAE                       : 1.74280416
3 day(s) RMSE                      : 3.63976403
3 day(s) R2                        : -11.37294333
3 day(s) Pearson r                 : -0.26851498
3 day(s) QLIKE                     : 0.07063474
5 day(s) MAE                       : 1.75808483
5 day(s) RMSE                      : 3.65160014
5 day(s) R2                        : -11.38173709
5 day(s) Pearson r                 : -0.26522029
5 day(s) QLIKE                     : 0.07138147
full horizon MAE                   : 1.75808483
full horizon RMSE                  : 3.65160014
full horizon R2                    : -11.38173709
full horizon Pearson r             : -0.26522029
full horizon QLIKE                 : 0.07138147

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/BAC/Custom_KAN_H5.pkl

Saved y_true min=0.587715, max=111.472
Saved y_pred min=1.62862, max=17.6604

=== BAC | H=5 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2018
Time steps for y: 5
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.142355974179488
  Min value:  -0.42151023550938554
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.106232115580254
  Min value:  -11.491304399558798
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.800014951166767
  Min value:  -0.36472825885137616
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.253588840112363
  Min value:  -1.9710462778525513
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.111754968787256
  Min value:  -0.38915813676977
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.205991919409481
  Min value:  -11.491304399558798
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 162s 481ms/step - loss: 0.8532 - val_loss: 0.5739 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 10s 212ms/step - loss: 0.5766 - val_loss: 0.4571 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 12s 251ms/step - loss: 0.5103 - val_loss: 0.5189 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 244ms/step - loss: 0.4804 - val_loss: 0.4528 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 250ms/step - loss: 0.4514 - val_loss: 0.4842 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 246ms/step - loss: 0.4320 - val_loss: 0.4611 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 247ms/step - loss: 0.4208 - val_loss: 0.4785 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 249ms/step - loss: 0.4146 - val_loss: 0.5208 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 0s 237ms/step - loss: 0.3834
Epoch 9: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 244ms/step - loss: 0.4092 - val_loss: 0.5503 - learning_rate: 5.0000e-04
Epoch 9: early stopping
Restoring model weights from the end of the best epoch: 4.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 5
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.16254479
1 day(s) RMSE                      : 7.61616483
1 day(s) R2                        : 0.03739228
1 day(s) Pearson r                 : 0.29405977
1 day(s) QLIKE                     : 0.32518651
3 day(s) MAE                       : 2.15970029
3 day(s) RMSE                      : 7.74345658
3 day(s) R2                        : 0.00558192
3 day(s) Pearson r                 : 0.22022808
3 day(s) QLIKE                     : 0.37056787
5 day(s) MAE                       : 2.17472647
5 day(s) RMSE                      : 7.79888227
5 day(s) R2                        : -0.00839296
5 day(s) Pearson r                 : 0.16489381
5 day(s) QLIKE                     : 0.39194049
full horizon MAE                   : 2.17472647
full horizon RMSE                  : 7.79888227
full horizon R2                    : -0.00839296
full horizon Pearson r             : 0.16489381
full horizon QLIKE                 : 0.39194049

--- Task 2 ---
1 day(s) MAE                       : 0.07004484
1 day(s) RMSE                      : 0.08089273
1 day(s) R2                        : -2.72444489
1 day(s) Pearson r                 : 0.04919071
1 day(s) QLIKE                     : 2.65586125
3 day(s) MAE                       : 0.07158585
3 day(s) RMSE                      : 0.08255797
3 day(s) R2                        : -2.91096817
3 day(s) Pearson r                 : 0.01076867
3 day(s) QLIKE                     : 7.31794794
5 day(s) MAE                       : 0.07197767
5 day(s) RMSE                      : 0.08294115
5 day(s) R2                        : -2.97845444
5 day(s) Pearson r                 : 0.00918905
5 day(s) QLIKE                     : 11.83037043
full horizon MAE                   : 0.07197767
full horizon RMSE                  : 0.08294115
full horizon R2                    : -2.97845444
full horizon Pearson r             : 0.00918905
full horizon QLIKE                 : 11.83037043

--- Task 3 ---
1 day(s) MAE                       : 0.32691785
1 day(s) RMSE                      : 0.37556135
1 day(s) R2                        : -0.11415151
1 day(s) Pearson r                 : 0.16302681
1 day(s) QLIKE                     : 6.83534931
3 day(s) MAE                       : 0.32354858
3 day(s) RMSE                      : 0.36334012
3 day(s) R2                        : -0.04276788
3 day(s) Pearson r                 : 0.02406011
3 day(s) QLIKE                     : 6.84299555
5 day(s) MAE                       : 0.32291458
5 day(s) RMSE                      : 0.35995652
5 day(s) R2                        : -0.02339674
5 day(s) Pearson r                 : 0.04753250
5 day(s) QLIKE                     : 6.84355992
full horizon MAE                   : 0.32291458
full horizon RMSE                  : 0.35995652
full horizon R2                    : -0.02339674
full horizon Pearson r             : 0.04753250
full horizon QLIKE                 : 6.84355992

--- Task 4 ---
1 day(s) MAE                       : 0.90330908
1 day(s) RMSE                      : 1.12813065
1 day(s) R2                        : -1.39442822
1 day(s) Pearson r                 : -0.04231339
1 day(s) QLIKE                     : 0.37180528
3 day(s) MAE                       : 0.83071082
3 day(s) RMSE                      : 1.03584752
3 day(s) R2                        : -1.02100577
3 day(s) Pearson r                 : -0.03073603
3 day(s) QLIKE                     : 0.43479715
5 day(s) MAE                       : 0.77029336
5 day(s) RMSE                      : 0.94577899
5 day(s) R2                        : -0.68672869
5 day(s) Pearson r                 : -0.01925831
5 day(s) QLIKE                     : 0.48275681
full horizon MAE                   : 0.77029336
full horizon RMSE                  : 0.94577899
full horizon R2                    : -0.68672869
full horizon Pearson r             : -0.01925831
full horizon QLIKE                 : 0.48275681

--- Task 5 ---
1 day(s) MAE                       : 0.05242022
1 day(s) RMSE                      : 0.06647485
1 day(s) R2                        : -1.63515516
1 day(s) Pearson r                 : -0.07844138
1 day(s) QLIKE                     : 5.91447046
3 day(s) MAE                       : 0.05264951
3 day(s) RMSE                      : 0.06664330
3 day(s) R2                        : -1.65736422
3 day(s) Pearson r                 : -0.01727581
3 day(s) QLIKE                     : 13.74495787
5 day(s) MAE                       : 0.05286777
5 day(s) RMSE                      : 0.06679022
5 day(s) R2                        : -1.67585004
5 day(s) Pearson r                 : -0.01479129
5 day(s) QLIKE                     : 11.68814819
full horizon MAE                   : 0.05286777
full horizon RMSE                  : 0.06679022
full horizon R2                    : -1.67585004
full horizon Pearson r             : -0.01479129
full horizon QLIKE                 : 11.68814819

--- Task 6 ---
1 day(s) MAE                       : 1.34912549
1 day(s) RMSE                      : 1.70931726
1 day(s) R2                        : -1.74631194
1 day(s) Pearson r                 : -0.20179339
1 day(s) QLIKE                     : 0.02592959
3 day(s) MAE                       : 1.19200360
3 day(s) RMSE                      : 1.51059534
3 day(s) R2                        : -1.13119332
3 day(s) Pearson r                 : -0.16144436
3 day(s) QLIKE                     : 0.02882274
5 day(s) MAE                       : 1.13819544
5 day(s) RMSE                      : 1.47310240
5 day(s) R2                        : -1.01502772
5 day(s) Pearson r                 : -0.13360383
5 day(s) QLIKE                     : 0.03369750
full horizon MAE                   : 1.13819544
full horizon RMSE                  : 1.47310240
full horizon R2                    : -1.01502772
full horizon Pearson r             : -0.13360383
full horizon QLIKE                 : 0.03369750

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/BAC/Custom_KAN_LSTM_H5.pkl

Saved y_true min=0.587715, max=111.472
Saved y_pred min=2.08499, max=5.06627

=== BAC | H=10 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 10
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.142355974179488
  Min value:  -0.42151023550938554
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.103196331248472
  Min value:  -11.491352783545967
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.800014951166767
  Min value:  -0.36472825885137616
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.2515405379791398
  Min value:  -1.9606954808877406
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.111754968787256
  Min value:  -0.38915813676977
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.203435974440148
  Min value:  -11.491352783545967
Epoch 001 | phase=1 | train_loss=5.0010 | val_main=0.536720
Epoch 002 | phase=1 | train_loss=4.9013 | val_main=0.536765
Epoch 003 | phase=1 | train_loss=4.9571 | val_main=0.536868
Epoch 004 | phase=1 | train_loss=4.7704 | val_main=0.536950
Epoch 005 | phase=1 | train_loss=4.4548 | val_main=0.536915
Epoch 006 | phase=1 | train_loss=4.5139 | val_main=0.537046
Epoch 007 | phase=1 | train_loss=4.3809 | val_main=0.536997
Epoch 008 | phase=1 | train_loss=4.2676 | val_main=0.537097
Epoch 009 | phase=1 | train_loss=4.2648 | val_main=0.537233
Epoch 010 | phase=1 | train_loss=4.0298 | val_main=0.536860
Epoch 011 | phase=1 | train_loss=3.9483 | val_main=0.537115
Epoch 012 | phase=1 | train_loss=4.0520 | val_main=0.537000
Epoch 013 | phase=1 | train_loss=3.8559 | val_main=0.536965
Epoch 014 | phase=1 | train_loss=3.7924 | val_main=0.536917
Epoch 015 | phase=1 | train_loss=3.6161 | val_main=0.537068
Epoch 016 | phase=0 | train_loss=4.6228 | val_main=0.529338
Epoch 017 | phase=0 | train_loss=4.3395 | val_main=0.501402
Epoch 018 | phase=0 | train_loss=4.2134 | val_main=0.505107
Epoch 019 | phase=0 | train_loss=3.9097 | val_main=0.553127
Epoch 020 | phase=0 | train_loss=3.7150 | val_main=0.513077
Epoch 021 | phase=0 | train_loss=3.6330 | val_main=0.507749
Epoch 022 | phase=0 | train_loss=3.6073 | val_main=0.509842
Epoch 023 | phase=0 | train_loss=3.6310 | val_main=0.517197
Epoch 024 | phase=0 | train_loss=3.3245 | val_main=0.518156
Epoch 025 | phase=0 | train_loss=3.3517 | val_main=0.522236
Epoch 026 | phase=0 | train_loss=3.3343 | val_main=0.508254
Epoch 027 | phase=0 | train_loss=3.3106 | val_main=0.527277
Epoch 028 | phase=0 | train_loss=3.3546 | val_main=0.509757
Epoch 029 | phase=0 | train_loss=3.1087 | val_main=0.520470
Epoch 030 | phase=0 | train_loss=3.1270 | val_main=0.513141
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 60
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.34590783
1 day(s) RMSE                      : 7.75802991
1 day(s) R2                        : 0.00119762
1 day(s) Pearson r                 : 0.15966105
1 day(s) QLIKE                     : 0.40754764
3 day(s) MAE                       : 2.35499479
3 day(s) RMSE                      : 7.78203057
3 day(s) R2                        : -0.00435013
3 day(s) Pearson r                 : 0.07288586
3 day(s) QLIKE                     : 0.42043632
5 day(s) MAE                       : 2.36866853
5 day(s) RMSE                      : 7.79731201
5 day(s) R2                        : -0.00798694
5 day(s) Pearson r                 : 0.00352125
5 day(s) QLIKE                     : 0.42884842
10 day(s) MAE                      : 2.37775464
10 day(s) RMSE                     : 7.80807994
10 day(s) R2                       : -0.01067697
10 day(s) Pearson r                : -0.04459571
10 day(s) QLIKE                    : 0.43318596
full horizon MAE                   : 2.37775464
full horizon RMSE                  : 7.80807994
full horizon R2                    : -0.01067697
full horizon Pearson r             : -0.04459571
full horizon QLIKE                 : 0.43318596

--- Task 2 ---
1 day(s) MAE                       : 0.07206634
1 day(s) RMSE                      : 0.08125629
1 day(s) R2                        : -2.75799751
1 day(s) Pearson r                 : -0.14273978
1 day(s) QLIKE                     : 11.71580640
3 day(s) MAE                       : 0.07337047
3 day(s) RMSE                      : 0.08332631
3 day(s) R2                        : -2.98410330
3 day(s) Pearson r                 : -0.13625783
3 day(s) QLIKE                     : 11.89378264
5 day(s) MAE                       : 0.08245666
5 day(s) RMSE                      : 0.09588868
5 day(s) R2                        : -4.31751808
5 day(s) Pearson r                 : -0.12128773
5 day(s) QLIKE                     : 12.12633643
10 day(s) MAE                      : 0.08453744
10 day(s) RMSE                     : 0.09770019
10 day(s) R2                       : -4.60736754
10 day(s) Pearson r                : -0.12449508
10 day(s) QLIKE                    : 12.23010252
full horizon MAE                   : 0.08453744
full horizon RMSE                  : 0.09770019
full horizon R2                    : -4.60736754
full horizon Pearson r             : -0.12449508
full horizon QLIKE                 : 12.23010252

--- Task 3 ---
1 day(s) MAE                       : 0.31034323
1 day(s) RMSE                      : 0.37152759
1 day(s) R2                        : -0.09034673
1 day(s) Pearson r                 : 0.26719008
1 day(s) QLIKE                     : 6.82437096
3 day(s) MAE                       : 0.31479138
3 day(s) RMSE                      : 0.37737315
3 day(s) R2                        : -0.12487151
3 day(s) Pearson r                 : 0.24346146
3 day(s) QLIKE                     : 6.82687393
5 day(s) MAE                       : 0.31539033
5 day(s) RMSE                      : 0.37839908
5 day(s) R2                        : -0.13095177
5 day(s) Pearson r                 : 0.23487974
5 day(s) QLIKE                     : 6.82939444
10 day(s) MAE                      : 0.31598961
10 day(s) RMSE                     : 0.38033026
10 day(s) R2                       : -0.14272052
10 day(s) Pearson r                : 0.21105051
10 day(s) QLIKE                    : 6.83703672
full horizon MAE                   : 0.31598961
full horizon RMSE                  : 0.38033026
full horizon R2                    : -0.14272052
full horizon Pearson r             : 0.21105051
full horizon QLIKE                 : 6.83703672

--- Task 4 ---
1 day(s) MAE                       : 0.75512644
1 day(s) RMSE                      : 0.94823929
1 day(s) R2                        : -0.69168273
1 day(s) Pearson r                 : 0.21882662
1 day(s) QLIKE                     : 0.57376015
3 day(s) MAE                       : 0.76538425
3 day(s) RMSE                      : 0.95755977
3 day(s) R2                        : -0.72706097
3 day(s) Pearson r                 : 0.22125383
3 day(s) QLIKE                     : 0.55829232
5 day(s) MAE                       : 0.75174942
5 day(s) RMSE                      : 0.94373346
5 day(s) R2                        : -0.67944048
5 day(s) Pearson r                 : 0.21529906
5 day(s) QLIKE                     : 0.58602024
10 day(s) MAE                      : 0.73747537
10 day(s) RMSE                     : 0.93020271
10 day(s) R2                       : -0.63613332
10 day(s) Pearson r                : 0.22373957
10 day(s) QLIKE                    : 0.61186191
full horizon MAE                   : 0.73747537
full horizon RMSE                  : 0.93020271
full horizon R2                    : -0.63613332
full horizon Pearson r             : 0.22373957
full horizon QLIKE                 : 0.61186191

--- Task 5 ---
1 day(s) MAE                       : 0.03614182
1 day(s) RMSE                      : 0.04662917
1 day(s) R2                        : -0.29660202
1 day(s) Pearson r                 : 0.52471613
1 day(s) QLIKE                     : 6.08611143
3 day(s) MAE                       : 0.03549608
3 day(s) RMSE                      : 0.04585899
3 day(s) R2                        : -0.25830837
3 day(s) Pearson r                 : 0.51913458
3 day(s) QLIKE                     : 6.04060621
5 day(s) MAE                       : 0.03652988
5 day(s) RMSE                      : 0.04620915
5 day(s) R2                        : -0.28083127
5 day(s) Pearson r                 : 0.53011738
5 day(s) QLIKE                     : 5.96966409
10 day(s) MAE                      : 0.03636241
10 day(s) RMSE                     : 0.04585370
10 day(s) R2                       : -0.27040750
10 day(s) Pearson r                : 0.53787404
10 day(s) QLIKE                    : 5.79205217
full horizon MAE                   : 0.03636241
full horizon RMSE                  : 0.04585370
full horizon R2                    : -0.27040750
full horizon Pearson r             : 0.53787404
full horizon QLIKE                 : 5.79205217

--- Task 6 ---
1 day(s) MAE                       : 1.82638128
1 day(s) RMSE                      : 2.96022589
1 day(s) R2                        : -7.23671833
1 day(s) Pearson r                 : -0.26402920
1 day(s) QLIKE                     : 0.08031078
3 day(s) MAE                       : 1.91284644
3 day(s) RMSE                      : 3.16769413
3 day(s) R2                        : -8.37158494
3 day(s) Pearson r                 : -0.27116945
3 day(s) QLIKE                     : 0.08522938
5 day(s) MAE                       : 1.93444349
5 day(s) RMSE                      : 3.21251542
5 day(s) R2                        : -8.58309053
5 day(s) Pearson r                 : -0.26871955
5 day(s) QLIKE                     : 0.08617828
10 day(s) MAE                      : 1.96810134
10 day(s) RMSE                     : 3.25252692
10 day(s) R2                       : -8.66210283
10 day(s) Pearson r                : -0.26942197
10 day(s) QLIKE                    : 0.08838694
full horizon MAE                   : 1.96810134
full horizon RMSE                  : 3.25252692
full horizon R2                    : -8.66210283
full horizon Pearson r             : -0.26942197
full horizon QLIKE                 : 0.08838694

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/BAC/Custom_KAN_H10.pkl

Saved y_true min=0.587715, max=111.472
Saved y_pred min=2.50056, max=4.26984

=== BAC | H=10 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2018
Time steps for y: 10
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.142355974179488
  Min value:  -0.42151023550938554
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.103196331248472
  Min value:  -11.491352783545967
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.800014951166767
  Min value:  -0.36472825885137616
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.2515405379791398
  Min value:  -1.9606954808877406
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.111754968787256
  Min value:  -0.38915813676977
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.203435974440148
  Min value:  -11.491352783545967
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 143s 490ms/step - loss: 0.8209 - val_loss: 0.5610 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 249ms/step - loss: 0.5139 - val_loss: 0.5171 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 15s 333ms/step - loss: 0.4406 - val_loss: 0.5513 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 18s 387ms/step - loss: 0.4006 - val_loss: 0.8866 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 17s 372ms/step - loss: 0.3810 - val_loss: 0.8487 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 454ms/step - loss: 0.4069 - val_loss: 0.5751 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 0s 359ms/step - loss: 0.3744
Epoch 7: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
46/46 ━━━━━━━━━━━━━━━━━━━━ 17s 369ms/step - loss: 0.3709 - val_loss: 0.5524 - learning_rate: 5.0000e-04
Epoch 7: early stopping
Restoring model weights from the end of the best epoch: 2.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 10
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.61792440
1 day(s) RMSE                      : 7.76164787
1 day(s) R2                        : 0.00026582
1 day(s) Pearson r                 : 0.35168474
1 day(s) QLIKE                     : 0.42234177
3 day(s) MAE                       : 2.53037497
3 day(s) RMSE                      : 7.77124332
3 day(s) R2                        : -0.00156766
3 day(s) Pearson r                 : 0.00296058
3 day(s) QLIKE                     : 0.42529610
5 day(s) MAE                       : 2.40686350
5 day(s) RMSE                      : 7.80222085
5 day(s) R2                        : -0.00925650
5 day(s) Pearson r                 : -0.00053861
5 day(s) QLIKE                     : 0.43496490
10 day(s) MAE                      : 2.30011117
10 day(s) RMSE                     : 7.85221073
10 day(s) R2                       : -0.02213383
10 day(s) Pearson r                : -0.00134547
10 day(s) QLIKE                    : 0.43950601
full horizon MAE                   : 2.30011117
full horizon RMSE                  : 7.85221073
full horizon R2                    : -0.02213383
full horizon Pearson r             : -0.00134547
full horizon QLIKE                 : 0.43950601

--- Task 2 ---
1 day(s) MAE                       : 0.07030682
1 day(s) RMSE                      : 0.08119597
1 day(s) R2                        : -2.75242069
1 day(s) Pearson r                 : 0.08582186
1 day(s) QLIKE                     : 2.63841790
3 day(s) MAE                       : 0.07066192
3 day(s) RMSE                      : 0.08153817
3 day(s) R2                        : -2.81494383
3 day(s) Pearson r                 : 0.05230699
3 day(s) QLIKE                     : 2.89788945
5 day(s) MAE                       : 0.07139619
5 day(s) RMSE                      : 0.08230157
5 day(s) R2                        : -2.91733359
5 day(s) Pearson r                 : 0.04223065
5 day(s) QLIKE                     : 13.21461445
10 day(s) MAE                      : 0.07180376
10 day(s) RMSE                     : 0.08265384
10 day(s) R2                       : -3.01323160
10 day(s) Pearson r                : 0.03012506
10 day(s) QLIKE                    : 8.75563277
full horizon MAE                   : 0.07180376
full horizon RMSE                  : 0.08265384
full horizon R2                    : -3.01323160
full horizon Pearson r             : 0.03012506
full horizon QLIKE                 : 8.75563277

--- Task 3 ---
1 day(s) MAE                       : 0.32394873
1 day(s) RMSE                      : 0.36058464
1 day(s) R2                        : -0.02706259
1 day(s) Pearson r                 : 0.04522192
1 day(s) QLIKE                     : 6.83803996
3 day(s) MAE                       : 0.32331031
3 day(s) RMSE                      : 0.35806194
3 day(s) R2                        : -0.01269167
3 day(s) Pearson r                 : 0.00139543
3 day(s) QLIKE                     : 6.83956108
5 day(s) MAE                       : 0.32422904
5 day(s) RMSE                      : 0.35781382
5 day(s) R2                        : -0.01124913
5 day(s) Pearson r                 : 0.00283365
5 day(s) QLIKE                     : 6.84192918
10 day(s) MAE                      : 0.32939485
10 day(s) RMSE                     : 0.36259803
10 day(s) R2                       : -0.03864978
10 day(s) Pearson r                : 0.01782435
10 day(s) QLIKE                    : 6.84697748
full horizon MAE                   : 0.32939485
full horizon RMSE                  : 0.36259803
full horizon R2                    : -0.03864978
full horizon Pearson r             : 0.01782435
full horizon QLIKE                 : 6.84697748

--- Task 4 ---
1 day(s) MAE                       : 0.96121378
1 day(s) RMSE                      : 1.18639477
1 day(s) R2                        : -1.64814324
1 day(s) Pearson r                 : 0.03934531
1 day(s) QLIKE                     : 0.37004759
3 day(s) MAE                       : 0.86428306
3 day(s) RMSE                      : 1.07632842
3 day(s) R2                        : -1.18205405
3 day(s) Pearson r                 : 0.00315169
3 day(s) QLIKE                     : 0.45606930
5 day(s) MAE                       : 0.78101028
5 day(s) RMSE                      : 0.95750475
5 day(s) R2                        : -0.72881208
5 day(s) Pearson r                 : 0.00146129
5 day(s) QLIKE                     : 0.56441939
10 day(s) MAE                      : 0.72238635
10 day(s) RMSE                     : 0.85251688
10 day(s) R2                       : -0.37426174
10 day(s) Pearson r                : 0.00241930
10 day(s) QLIKE                    : 0.59540425
full horizon MAE                   : 0.72238635
full horizon RMSE                  : 0.85251688
full horizon R2                    : -0.37426174
full horizon Pearson r             : 0.00241930
full horizon QLIKE                 : 0.59540425

--- Task 5 ---
1 day(s) MAE                       : 0.05240711
1 day(s) RMSE                      : 0.06645369
1 day(s) R2                        : -1.63347726
1 day(s) Pearson r                 : -0.12386370
1 day(s) QLIKE                     : 5.74400210
3 day(s) MAE                       : 0.05261741
3 day(s) RMSE                      : 0.06659527
3 day(s) R2                        : -1.65353534
3 day(s) Pearson r                 : -0.01051259
3 day(s) QLIKE                     : 6.46084475
5 day(s) MAE                       : 0.05284851
5 day(s) RMSE                      : 0.06676147
5 day(s) R2                        : -1.67354681
5 day(s) Pearson r                 : -0.01018956
5 day(s) QLIKE                     : 14.16369397
10 day(s) MAE                      : 0.05336868
10 day(s) RMSE                     : 0.06709525
10 day(s) R2                       : -1.72005593
10 day(s) Pearson r                : -0.01267468
10 day(s) QLIKE                    : 11.32019127
full horizon MAE                   : 0.05336868
full horizon RMSE                  : 0.06709525
full horizon R2                    : -1.72005593
full horizon Pearson r             : -0.01267468
full horizon QLIKE                 : 11.32019127

--- Task 6 ---
1 day(s) MAE                       : 2.29643825
1 day(s) RMSE                      : 2.50475036
1 day(s) R2                        : -4.89703093
1 day(s) Pearson r                 : 0.19121087
1 day(s) QLIKE                     : 0.02058462
3 day(s) MAE                       : 2.05240935
3 day(s) RMSE                      : 2.28244379
3 day(s) R2                        : -3.86549186
3 day(s) Pearson r                 : 0.04246470
3 day(s) QLIKE                     : 0.02096699
5 day(s) MAE                       : 1.65575480
5 day(s) RMSE                      : 1.95012368
5 day(s) R2                        : -2.53133939
5 day(s) Pearson r                 : 0.00385013
5 day(s) QLIKE                     : 0.03044219
10 day(s) MAE                      : 1.39327189
10 day(s) RMSE                     : 1.70956402
10 day(s) R2                       : -1.66931970
10 day(s) Pearson r                : 0.00728075
10 day(s) QLIKE                    : 0.05017669
full horizon MAE                   : 1.39327189
full horizon RMSE                  : 1.70956402
full horizon R2                    : -1.66931970
full horizon Pearson r             : 0.00728075
full horizon QLIKE                 : 0.05017669

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/BAC/Custom_KAN_LSTM_H10.pkl

Saved y_true min=0.587715, max=111.472
Saved y_pred min=2.21556, max=3.8027

=== BAC | H=20 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 20
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.142355974179488
  Min value:  -0.42151023550938554
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.103268362353512
  Min value:  -11.491398632817031
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.800014951166767
  Min value:  -0.36472825885137616
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.2516894965633285
  Min value:  -1.94034481938301
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.111754968787256
  Min value:  -0.38915813676977
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.203545386302637
  Min value:  -11.491398632817031
Epoch 001 | phase=1 | train_loss=5.0668 | val_main=0.539934
Epoch 002 | phase=1 | train_loss=4.9315 | val_main=0.539984
Epoch 003 | phase=1 | train_loss=5.0042 | val_main=0.540100
Epoch 004 | phase=1 | train_loss=4.9010 | val_main=0.540134
Epoch 005 | phase=1 | train_loss=4.5863 | val_main=0.539918
Epoch 006 | phase=1 | train_loss=4.5680 | val_main=0.540063
Epoch 007 | phase=1 | train_loss=4.4653 | val_main=0.539838
Epoch 008 | phase=1 | train_loss=4.3096 | val_main=0.540001
Epoch 009 | phase=1 | train_loss=4.2756 | val_main=0.540173
Epoch 010 | phase=1 | train_loss=4.0859 | val_main=0.539798
Epoch 011 | phase=1 | train_loss=4.0025 | val_main=0.540084
Epoch 012 | phase=1 | train_loss=4.1234 | val_main=0.540001
Epoch 013 | phase=1 | train_loss=3.8998 | val_main=0.540044
Epoch 014 | phase=1 | train_loss=3.8417 | val_main=0.540019
Epoch 015 | phase=1 | train_loss=3.6804 | val_main=0.540006
Epoch 016 | phase=0 | train_loss=4.7309 | val_main=0.536237
Epoch 017 | phase=0 | train_loss=4.4614 | val_main=0.515261
Epoch 018 | phase=0 | train_loss=4.3773 | val_main=0.530009
Epoch 019 | phase=0 | train_loss=4.1156 | val_main=0.581082
Epoch 020 | phase=0 | train_loss=3.8890 | val_main=0.544786
Epoch 021 | phase=0 | train_loss=3.8467 | val_main=0.537501
Epoch 022 | phase=0 | train_loss=3.7569 | val_main=0.535718
Epoch 023 | phase=0 | train_loss=3.8449 | val_main=0.541523
Epoch 024 | phase=0 | train_loss=3.5522 | val_main=0.543480
Epoch 025 | phase=0 | train_loss=3.5228 | val_main=0.555115
Epoch 026 | phase=0 | train_loss=3.4756 | val_main=0.566396
Epoch 027 | phase=0 | train_loss=3.4451 | val_main=0.576659
Epoch 028 | phase=0 | train_loss=3.4579 | val_main=0.560898
Epoch 029 | phase=0 | train_loss=3.3072 | val_main=0.591512
Epoch 030 | phase=0 | train_loss=3.2482 | val_main=0.591284
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 120
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.30621634
1 day(s) RMSE                      : 7.74946106
1 day(s) R2                        : 0.00340278
1 day(s) Pearson r                 : 0.22546728
1 day(s) QLIKE                     : 0.39833677
3 day(s) MAE                       : 2.33237235
3 day(s) RMSE                      : 7.77608271
3 day(s) R2                        : -0.00281546
3 day(s) Pearson r                 : 0.13138949
3 day(s) QLIKE                     : 0.41333628
5 day(s) MAE                       : 2.34401869
5 day(s) RMSE                      : 7.79083744
5 day(s) R2                        : -0.00631365
5 day(s) Pearson r                 : 0.05498443
5 day(s) QLIKE                     : 0.42153248
10 day(s) MAE                      : 2.35034990
10 day(s) RMSE                     : 7.80704324
10 day(s) R2                       : -0.01040861
10 day(s) Pearson r                : -0.02063303
10 day(s) QLIKE                    : 0.42934899
20 day(s) MAE                      : 2.36062174
20 day(s) RMSE                     : 7.81097809
20 day(s) R2                       : -0.01176057
20 day(s) Pearson r                : -0.05425925
20 day(s) QLIKE                    : 0.43000817
full horizon MAE                   : 2.36062174
full horizon RMSE                  : 7.81097809
full horizon R2                    : -0.01176057
full horizon Pearson r             : -0.05425925
full horizon QLIKE                 : 0.43000817

--- Task 2 ---
1 day(s) MAE                       : 0.05908268
1 day(s) RMSE                      : 0.06822362
1 day(s) R2                        : -1.64918321
1 day(s) Pearson r                 : -0.01087365
1 day(s) QLIKE                     : 9.56081151
3 day(s) MAE                       : 0.05881067
3 day(s) RMSE                      : 0.06754039
3 day(s) R2                        : -1.61754055
3 day(s) Pearson r                 : -0.02167353
3 day(s) QLIKE                     : 9.58699470
5 day(s) MAE                       : 0.06324227
5 day(s) RMSE                      : 0.07342627
5 day(s) R2                        : -2.11800764
5 day(s) Pearson r                 : -0.02993110
5 day(s) QLIKE                     : 9.85102063
10 day(s) MAE                      : 0.06509059
10 day(s) RMSE                     : 0.07516596
10 day(s) R2                       : -2.31902604
10 day(s) Pearson r                : -0.04016992
10 day(s) QLIKE                    : 10.22771802
20 day(s) MAE                      : 0.06804632
20 day(s) RMSE                     : 0.08016317
20 day(s) R2                       : -2.86379608
20 day(s) Pearson r                : -0.06726053
20 day(s) QLIKE                    : 9.94153877
full horizon MAE                   : 0.06804632
full horizon RMSE                  : 0.08016317
full horizon R2                    : -2.86379608
full horizon Pearson r             : -0.06726053
full horizon QLIKE                 : 9.94153877

--- Task 3 ---
1 day(s) MAE                       : 0.31381848
1 day(s) RMSE                      : 0.37867126
1 day(s) R2                        : -0.13267982
1 day(s) Pearson r                 : 0.21149946
1 day(s) QLIKE                     : 6.82831597
3 day(s) MAE                       : 0.31567088
3 day(s) RMSE                      : 0.38158962
3 day(s) R2                        : -0.15014879
3 day(s) Pearson r                 : 0.19909437
3 day(s) QLIKE                     : 6.83039079
5 day(s) MAE                       : 0.31549558
5 day(s) RMSE                      : 0.38186279
5 day(s) R2                        : -0.15175110
5 day(s) Pearson r                 : 0.19215820
5 day(s) QLIKE                     : 6.83363268
10 day(s) MAE                      : 0.31717277
10 day(s) RMSE                     : 0.38574733
10 day(s) R2                       : -0.17550400
10 day(s) Pearson r                : 0.16492785
10 day(s) QLIKE                    : 6.84227266
20 day(s) MAE                      : 0.31732398
20 day(s) RMSE                     : 0.38717770
20 day(s) R2                       : -0.18363342
20 day(s) Pearson r                : 0.14738760
20 day(s) QLIKE                    : 6.84872049
full horizon MAE                   : 0.31732398
full horizon RMSE                  : 0.38717770
full horizon R2                    : -0.18363342
full horizon Pearson r             : 0.14738760
full horizon QLIKE                 : 6.84872049

--- Task 4 ---
1 day(s) MAE                       : 0.81891207
1 day(s) RMSE                      : 1.01523587
1 day(s) R2                        : -0.93917468
1 day(s) Pearson r                 : 0.15072295
1 day(s) QLIKE                     : 0.55906894
3 day(s) MAE                       : 0.80940210
3 day(s) RMSE                      : 1.00392833
3 day(s) R2                        : -0.89837194
3 day(s) Pearson r                 : 0.15225935
3 day(s) QLIKE                     : 0.58705643
5 day(s) MAE                       : 0.79780174
5 day(s) RMSE                      : 0.99032482
5 day(s) R2                        : -0.84935906
5 day(s) Pearson r                 : 0.15813838
5 day(s) QLIKE                     : 0.59762807
10 day(s) MAE                      : 0.77649047
10 day(s) RMSE                     : 0.96775698
10 day(s) R2                       : -0.77090846
10 day(s) Pearson r                : 0.17306089
10 day(s) QLIKE                    : 0.63099985
20 day(s) MAE                      : 0.75676972
20 day(s) RMSE                     : 0.94925502
20 day(s) R2                       : -0.71262000
20 day(s) Pearson r                : 0.21438220
20 day(s) QLIKE                    : 0.63175612
full horizon MAE                   : 0.75676972
full horizon RMSE                  : 0.94925502
full horizon R2                    : -0.71262000
full horizon Pearson r             : 0.21438220
full horizon QLIKE                 : 0.63175612

--- Task 5 ---
1 day(s) MAE                       : 0.03393362
1 day(s) RMSE                      : 0.04283346
1 day(s) R2                        : -0.09410181
1 day(s) Pearson r                 : 0.58157407
1 day(s) QLIKE                     : 6.03120779
3 day(s) MAE                       : 0.03662793
3 day(s) RMSE                      : 0.04572905
3 day(s) R2                        : -0.25118769
3 day(s) Pearson r                 : 0.56401463
3 day(s) QLIKE                     : 6.01110399
5 day(s) MAE                       : 0.03753888
5 day(s) RMSE                      : 0.04642153
5 day(s) R2                        : -0.29263191
5 day(s) Pearson r                 : 0.55974853
5 day(s) QLIKE                     : 5.95176416
10 day(s) MAE                      : 0.03742993
10 day(s) RMSE                     : 0.04609014
10 day(s) R2                       : -0.28354284
10 day(s) Pearson r                : 0.56276750
10 day(s) QLIKE                    : 5.79677805
20 day(s) MAE                      : 0.03856730
20 day(s) RMSE                     : 0.04724975
20 day(s) R2                       : -0.37467305
20 day(s) Pearson r                : 0.56830236
20 day(s) QLIKE                    : 5.47496037
full horizon MAE                   : 0.03856730
full horizon RMSE                  : 0.04724975
full horizon R2                    : -0.37467305
full horizon Pearson r             : 0.56830236
full horizon QLIKE                 : 5.47496037

--- Task 6 ---
1 day(s) MAE                       : 1.64973608
1 day(s) RMSE                      : 2.42131750
1 day(s) R2                        : -4.51071552
1 day(s) Pearson r                 : -0.30045540
1 day(s) QLIKE                     : 0.06921057
3 day(s) MAE                       : 1.70839140
3 day(s) RMSE                      : 2.51032310
3 day(s) R2                        : -4.88553317
3 day(s) Pearson r                 : -0.31025651
3 day(s) QLIKE                     : 0.07290938
5 day(s) MAE                       : 1.73109746
5 day(s) RMSE                      : 2.55060369
5 day(s) R2                        : -5.04089264
5 day(s) Pearson r                 : -0.31678469
5 day(s) QLIKE                     : 0.07420467
10 day(s) MAE                      : 1.76259562
10 day(s) RMSE                     : 2.59192191
10 day(s) R2                       : -5.13583512
10 day(s) Pearson r                : -0.32471327
10 day(s) QLIKE                    : 0.07606841
20 day(s) MAE                      : 1.79594364
20 day(s) RMSE                     : 2.64762141
20 day(s) R2                       : -5.20184345
20 day(s) Pearson r                : -0.32415207
20 day(s) QLIKE                    : 0.07897685
full horizon MAE                   : 1.79594364
full horizon RMSE                  : 2.64762141
full horizon R2                    : -5.20184345
full horizon Pearson r             : -0.32415207
full horizon QLIKE                 : 0.07897685

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/BAC/Custom_KAN_H20.pkl

Saved y_true min=0.587715, max=111.472
Saved y_pred min=2.43049, max=4.47631

=== BAC | H=20 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2018
Time steps for y: 20
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.142355974179488
  Min value:  -0.42151023550938554
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.103268362353512
  Min value:  -11.491398632817031
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.800014951166767
  Min value:  -0.36472825885137616
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.2516894965633285
  Min value:  -1.94034481938301
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.111754968787256
  Min value:  -0.38915813676977
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.203545386302637
  Min value:  -11.491398632817031
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 147s 570ms/step - loss: 0.8130 - val_loss: 0.4972 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 15s 325ms/step - loss: 0.4822 - val_loss: 0.9885 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 456ms/step - loss: 0.3974 - val_loss: 1.6913 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 456ms/step - loss: 0.3598 - val_loss: 0.5860 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 462ms/step - loss: 0.3828 - val_loss: 0.5746 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 0s 552ms/step - loss: 0.3375
Epoch 6: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
46/46 ━━━━━━━━━━━━━━━━━━━━ 26s 566ms/step - loss: 0.3393 - val_loss: 0.5025 - learning_rate: 5.0000e-04
Epoch 6: early stopping
Restoring model weights from the end of the best epoch: 1.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 20
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.45825875
1 day(s) RMSE                      : 7.77151281
1 day(s) R2                        : -0.00227709
1 day(s) Pearson r                 : 0.49840841
1 day(s) QLIKE                     : 0.42290087
3 day(s) MAE                       : 2.34487538
3 day(s) RMSE                      : 7.79718136
3 day(s) R2                        : -0.00826467
3 day(s) Pearson r                 : 0.00844745
3 day(s) QLIKE                     : 0.42613528
5 day(s) MAE                       : 2.29761018
5 day(s) RMSE                      : 7.81581143
5 day(s) R2                        : -0.01277558
5 day(s) Pearson r                 : 0.00371511
5 day(s) QLIKE                     : 0.42757047
10 day(s) MAE                      : 2.28878469
10 day(s) RMSE                     : 7.82057971
10 day(s) R2                       : -0.01391550
10 day(s) Pearson r                : 0.00061047
10 day(s) QLIKE                    : 0.42549425
20 day(s) MAE                      : 2.30827473
20 day(s) RMSE                     : 7.81276179
20 day(s) R2                       : -0.01222271
20 day(s) Pearson r                : 0.00090955
20 day(s) QLIKE                    : 0.42280907
full horizon MAE                   : 2.30827473
full horizon RMSE                  : 7.81276179
full horizon R2                    : -0.01222271
full horizon Pearson r             : 0.00090955
full horizon QLIKE                 : 0.42280907

--- Task 2 ---
1 day(s) MAE                       : 0.07210984
1 day(s) RMSE                      : 0.08318265
1 day(s) R2                        : -2.93829295
1 day(s) Pearson r                 : -0.08682322
1 day(s) QLIKE                     : 2.64269584
3 day(s) MAE                       : 0.07255141
3 day(s) RMSE                      : 0.08362155
3 day(s) R2                        : -3.01238560
3 day(s) Pearson r                 : -0.00053045
3 day(s) QLIKE                     : 7.19684871
5 day(s) MAE                       : 0.07255700
5 day(s) RMSE                      : 0.08357799
5 day(s) R2                        : -3.03978425
5 day(s) Pearson r                 : 0.00148602
5 day(s) QLIKE                     : 8.14458341
10 day(s) MAE                      : 0.07238416
10 day(s) RMSE                     : 0.08329179
10 day(s) R2                       : -3.07542224
10 day(s) Pearson r                : 0.00351051
10 day(s) QLIKE                    : 6.81972014
20 day(s) MAE                      : 0.07199732
20 day(s) RMSE                     : 0.08273224
20 day(s) R2                       : -3.11541830
20 day(s) Pearson r                : 0.00519323
20 day(s) QLIKE                    : 5.10378877
full horizon MAE                   : 0.07199732
full horizon RMSE                  : 0.08273224
full horizon R2                    : -3.11541830
full horizon Pearson r             : 0.00519323
full horizon QLIKE                 : 5.10378877

--- Task 3 ---
1 day(s) MAE                       : 0.32307302
1 day(s) RMSE                      : 0.35731368
1 day(s) R2                        : -0.00851361
1 day(s) Pearson r                 : -0.06487902
1 day(s) QLIKE                     : 6.83839007
3 day(s) MAE                       : 0.32574428
3 day(s) RMSE                      : 0.36503071
3 day(s) R2                        : -0.05249424
3 day(s) Pearson r                 : -0.00664508
3 day(s) QLIKE                     : 6.84090282
5 day(s) MAE                       : 0.32569589
5 day(s) RMSE                      : 0.36541725
5 day(s) R2                        : -0.05468321
5 day(s) Pearson r                 : -0.00087579
5 day(s) QLIKE                     : 6.84116715
10 day(s) MAE                      : 0.32479314
10 day(s) RMSE                     : 0.36492222
10 day(s) R2                       : -0.05200761
10 day(s) Pearson r                : 0.02194557
10 day(s) QLIKE                    : 6.84101756
20 day(s) MAE                      : 0.32542892
20 day(s) RMSE                     : 0.36523767
20 day(s) R2                       : -0.05328929
20 day(s) Pearson r                : 0.01182756
20 day(s) QLIKE                    : 6.84194674
full horizon MAE                   : 0.32542892
full horizon RMSE                  : 0.36523767
full horizon R2                    : -0.05328929
full horizon Pearson r             : 0.01182756
full horizon QLIKE                 : 6.84194674

--- Task 4 ---
1 day(s) MAE                       : 0.96436980
1 day(s) RMSE                      : 1.18956516
1 day(s) R2                        : -1.66231532
1 day(s) Pearson r                 : -0.01803279
1 day(s) QLIKE                     : 0.37018160
3 day(s) MAE                       : 1.01819352
3 day(s) RMSE                      : 1.24189180
3 day(s) R2                        : -1.90498175
3 day(s) Pearson r                 : 0.00012404
3 day(s) QLIKE                     : 0.42091940
5 day(s) MAE                       : 1.05046637
5 day(s) RMSE                      : 1.27238385
5 day(s) R2                        : -2.05282709
5 day(s) Pearson r                 : 0.00015178
5 day(s) QLIKE                     : 0.50423131
10 day(s) MAE                      : 1.08994967
10 day(s) RMSE                     : 1.30799188
10 day(s) R2                       : -2.23499478
10 day(s) Pearson r                : -0.00154953
10 day(s) QLIKE                    : 0.67390053
20 day(s) MAE                      : 1.11711669
20 day(s) RMSE                     : 1.33109570
20 day(s) R2                       : -2.36754780
20 day(s) Pearson r                : -0.00301744
20 day(s) QLIKE                    : 0.66052151
full horizon MAE                   : 1.11711669
full horizon RMSE                  : 1.33109570
full horizon R2                    : -2.36754780
full horizon Pearson r             : -0.00301744
full horizon QLIKE                 : 0.66052151

--- Task 5 ---
1 day(s) MAE                       : 0.05242829
1 day(s) RMSE                      : 0.06648523
1 day(s) R2                        : -1.63597794
1 day(s) Pearson r                 : 0.08516192
1 day(s) QLIKE                     : 5.74001161
3 day(s) MAE                       : 0.05262091
3 day(s) RMSE                      : 0.06660075
3 day(s) R2                        : -1.65397220
3 day(s) Pearson r                 : 0.02330990
3 day(s) QLIKE                     : 5.69158093
5 day(s) MAE                       : 0.05284613
5 day(s) RMSE                      : 0.06675824
5 day(s) R2                        : -1.67328789
5 day(s) Pearson r                 : 0.00032298
5 day(s) QLIKE                     : 6.22868637
10 day(s) MAE                      : 0.05336735
10 day(s) RMSE                     : 0.06709344
10 day(s) R2                       : -1.71990928
10 day(s) Pearson r                : -0.00927312
10 day(s) QLIKE                    : 7.74703897
20 day(s) MAE                      : 0.05426103
20 day(s) RMSE                     : 0.06758356
20 day(s) R2                       : -1.81243538
20 day(s) Pearson r                : -0.01633503
20 day(s) QLIKE                    : 7.06541674
full horizon MAE                   : 0.05426103
full horizon RMSE                  : 0.06758356
full horizon R2                    : -1.81243538
full horizon Pearson r             : -0.01633503
full horizon QLIKE                 : 7.06541674

--- Task 6 ---
1 day(s) MAE                       : 2.25717880
1 day(s) RMSE                      : 2.46874799
1 day(s) R2                        : -4.72872572
1 day(s) Pearson r                 : 0.01429237
1 day(s) QLIKE                     : 0.02064932
3 day(s) MAE                       : 2.01612684
3 day(s) RMSE                      : 2.25915029
3 day(s) R2                        : -3.76668898
3 day(s) Pearson r                 : -0.05516634
3 day(s) QLIKE                     : 0.02158987
5 day(s) MAE                       : 2.39744499
5 day(s) RMSE                      : 2.69881179
5 day(s) R2                        : -5.76332638
5 day(s) Pearson r                 : -0.03267971
5 day(s) QLIKE                     : 0.02534568
10 day(s) MAE                      : 3.79945258
10 day(s) RMSE                     : 4.23268176
10 day(s) R2                       : -15.36293058
10 day(s) Pearson r                : -0.03267159
10 day(s) QLIKE                    : 0.03813104
20 day(s) MAE                      : 6.20525741
20 day(s) RMSE                     : 6.85946247
20 day(s) R2                       : -40.62835022
20 day(s) Pearson r                : -0.04584378
20 day(s) QLIKE                    : 0.05965078
full horizon MAE                   : 6.20525741
full horizon RMSE                  : 6.85946247
full horizon R2                    : -40.62835022
full horizon Pearson r             : -0.04584378
full horizon QLIKE                 : 0.05965078

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/BAC/Custom_KAN_LSTM_H20.pkl

Saved y_true min=0.587715, max=111.472
Saved y_pred min=2.52635, max=3.26592

=== C | H=1 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 1
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.8091406863669
  Min value:  -0.37248441284063266
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.256737301866204
  Min value:  -7.108340099155793
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.018765610501212
  Min value:  -0.35593837785433546
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.6474166417153167
  Min value:  -1.6879442751977214
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.615585669042774
  Min value:  -0.3593653801213122
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.113246907180985
  Min value:  -7.108340099155793
Epoch 001 | phase=1 | train_loss=5.0056 | val_main=0.488385
Epoch 002 | phase=1 | train_loss=4.8877 | val_main=0.488343
Epoch 003 | phase=1 | train_loss=4.8845 | val_main=0.488344
Epoch 004 | phase=1 | train_loss=4.5950 | val_main=0.488368
Epoch 005 | phase=1 | train_loss=4.5100 | val_main=0.488407
Epoch 006 | phase=1 | train_loss=4.6296 | val_main=0.488445
Epoch 007 | phase=1 | train_loss=4.3200 | val_main=0.488430
Epoch 008 | phase=1 | train_loss=4.2823 | val_main=0.488468
Epoch 009 | phase=1 | train_loss=4.3086 | val_main=0.488463
Epoch 010 | phase=1 | train_loss=4.1987 | val_main=0.488463
Epoch 011 | phase=1 | train_loss=4.1276 | val_main=0.488523
Epoch 012 | phase=1 | train_loss=4.1219 | val_main=0.488492
Epoch 013 | phase=1 | train_loss=3.8886 | val_main=0.488451
Epoch 014 | phase=1 | train_loss=3.8819 | val_main=0.488340
Epoch 015 | phase=1 | train_loss=3.9105 | val_main=0.488411
Epoch 016 | phase=0 | train_loss=4.8173 | val_main=0.472623
Epoch 017 | phase=0 | train_loss=4.5359 | val_main=0.439199
Epoch 018 | phase=0 | train_loss=4.3766 | val_main=0.422840
Epoch 019 | phase=0 | train_loss=4.3872 | val_main=0.443229
Epoch 020 | phase=0 | train_loss=4.3786 | val_main=0.427480
Epoch 021 | phase=0 | train_loss=4.2194 | val_main=0.419022
Epoch 022 | phase=0 | train_loss=4.1837 | val_main=0.419580
Epoch 023 | phase=0 | train_loss=4.1226 | val_main=0.414117
Epoch 024 | phase=0 | train_loss=3.9117 | val_main=0.402422
Epoch 025 | phase=0 | train_loss=3.8785 | val_main=0.411370
Epoch 026 | phase=0 | train_loss=4.0096 | val_main=0.396730
Epoch 027 | phase=0 | train_loss=3.7312 | val_main=0.393099
Epoch 028 | phase=0 | train_loss=3.7006 | val_main=0.392369
Epoch 029 | phase=0 | train_loss=3.4928 | val_main=0.384685
Epoch 030 | phase=0 | train_loss=3.3388 | val_main=0.387590
Epoch 031 | phase=2 | train_loss=0.3218 | val_main=0.382467
Epoch 032 | phase=2 | train_loss=0.3218 | val_main=0.378817
Epoch 033 | phase=2 | train_loss=0.3202 | val_main=0.383628
Epoch 034 | phase=2 | train_loss=0.3078 | val_main=0.387886
Epoch 035 | phase=2 | train_loss=0.3053 | val_main=0.385183
Epoch 036 | phase=2 | train_loss=0.3109 | val_main=0.388244
Epoch 037 | phase=2 | train_loss=0.2942 | val_main=0.374739
Epoch 038 | phase=2 | train_loss=0.2915 | val_main=0.380514
Epoch 039 | phase=2 | train_loss=0.2985 | val_main=0.397301
Epoch 040 | phase=2 | train_loss=0.2885 | val_main=0.380675
Epoch 041 | phase=2 | train_loss=0.2899 | val_main=0.380754
Epoch 042 | phase=2 | train_loss=0.2884 | val_main=0.381789
Epoch 043 | phase=2 | train_loss=0.2735 | val_main=0.396656
Epoch 044 | phase=2 | train_loss=0.2683 | val_main=0.407307
Epoch 045 | phase=2 | train_loss=0.2709 | val_main=0.390572
Epoch 046 | phase=2 | train_loss=0.2616 | val_main=0.407675
Epoch 047 | phase=2 | train_loss=0.2425 | val_main=0.402662
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 6
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.97808843
1 day(s) RMSE                      : 8.97214962
1 day(s) R2                        : 0.09649050
1 day(s) Pearson r                 : 0.48336487
1 day(s) QLIKE                     : 0.27438794
full horizon MAE                   : 2.97808843
full horizon RMSE                  : 8.97214962
full horizon R2                    : 0.09649050
full horizon Pearson r             : 0.48336487
full horizon QLIKE                 : 0.27438794

--- Task 2 ---
1 day(s) MAE                       : 0.05938099
1 day(s) RMSE                      : 0.08202204
1 day(s) R2                        : -4.43107542
1 day(s) Pearson r                 : -0.13146192
1 day(s) QLIKE                     : 18.09899247
full horizon MAE                   : 0.05938099
full horizon RMSE                  : 0.08202204
full horizon R2                    : -4.43107542
full horizon Pearson r             : -0.13146192
full horizon QLIKE                 : 18.09899247

--- Task 3 ---
1 day(s) MAE                       : 0.33854043
1 day(s) RMSE                      : 0.45211198
1 day(s) R2                        : -0.30033083
1 day(s) Pearson r                 : 0.10694129
1 day(s) QLIKE                     : 8.10399153
full horizon MAE                   : 0.33854043
full horizon RMSE                  : 0.45211198
full horizon R2                    : -0.30033083
full horizon Pearson r             : 0.10694129
full horizon QLIKE                 : 8.10399153

--- Task 4 ---
1 day(s) MAE                       : 0.86596509
1 day(s) RMSE                      : 1.14621760
1 day(s) R2                        : -0.65183380
1 day(s) Pearson r                 : 0.16132257
1 day(s) QLIKE                     : 1.88000620
full horizon MAE                   : 0.86596509
full horizon RMSE                  : 1.14621760
full horizon R2                    : -0.65183380
full horizon Pearson r             : 0.16132257
full horizon QLIKE                 : 1.88000620

--- Task 5 ---
1 day(s) MAE                       : 0.08753991
1 day(s) RMSE                      : 0.10423969
1 day(s) R2                        : -1.95152429
1 day(s) Pearson r                 : -0.15915185
1 day(s) QLIKE                     : 17.71438864
full horizon MAE                   : 0.08753991
full horizon RMSE                  : 0.10423969
full horizon R2                    : -1.95152429
full horizon Pearson r             : -0.15915185
full horizon QLIKE                 : 17.71438864

--- Task 6 ---
1 day(s) MAE                       : 7.05709593
1 day(s) RMSE                      : 21.29746208
1 day(s) R2                        : -56.32277766
1 day(s) Pearson r                 : -0.00291057
1 day(s) QLIKE                     : 0.17337443
full horizon MAE                   : 7.05709593
full horizon RMSE                  : 21.29746208
full horizon R2                    : -56.32277766
full horizon Pearson r             : -0.00291057
full horizon QLIKE                 : 0.17337443

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/C/Custom_KAN_H1.pkl

Saved y_true min=0.525001, max=134.647
Saved y_pred min=1.10925, max=86.6316

=== C | H=1 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2018
Time steps for y: 1
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.8091406863669
  Min value:  -0.37248441284063266
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.256737301866204
  Min value:  -7.108340099155793
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.018765610501212
  Min value:  -0.35593837785433546
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.6474166417153167
  Min value:  -1.6879442751977214
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.615585669042774
  Min value:  -0.3593653801213122
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.113246907180985
  Min value:  -7.108340099155793
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 161s 514ms/step - loss: 1.0165 - val_loss: 0.6904 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 191ms/step - loss: 0.9739 - val_loss: 0.4960 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 10s 217ms/step - loss: 0.9275 - val_loss: 0.5271 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 203ms/step - loss: 0.9216 - val_loss: 0.4732 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 186ms/step - loss: 0.8982 - val_loss: 0.4854 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 189ms/step - loss: 0.8941 - val_loss: 0.4349 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 192ms/step - loss: 0.8881 - val_loss: 0.4334 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 190ms/step - loss: 0.8785 - val_loss: 0.4261 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 191ms/step - loss: 0.8745 - val_loss: 0.4122 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=10 tf_ratio=0.526 -> TF=ON
Epoch 10/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 187ms/step - loss: 0.8682 - val_loss: 0.4108 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=11 tf_ratio=0.474 -> TF=ON
Epoch 11/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 188ms/step - loss: 0.8666 - val_loss: 0.4142 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=12 tf_ratio=0.421 -> TF=ON
Epoch 12/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 188ms/step - loss: 0.8614 - val_loss: 0.4215 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=13 tf_ratio=0.368 -> TF=ON
Epoch 13/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 187ms/step - loss: 0.8917 - val_loss: 0.4484 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=14 tf_ratio=0.316 -> TF=ON
Epoch 14/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 199ms/step - loss: 0.8729 - val_loss: 0.4046 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=15 tf_ratio=0.263 -> TF=ON
Epoch 15/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 204ms/step - loss: 0.8582 - val_loss: 0.4250 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=16 tf_ratio=0.211 -> TF=ON
Epoch 16/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 203ms/step - loss: 0.8570 - val_loss: 0.4041 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=17 tf_ratio=0.158 -> TF=ON
Epoch 17/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 200ms/step - loss: 0.8634 - val_loss: 0.5353 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=18 tf_ratio=0.105 -> TF=ON
Epoch 18/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 10s 208ms/step - loss: 0.9469 - val_loss: 0.5095 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=19 tf_ratio=0.053 -> TF=ON
Epoch 19/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 9s 199ms/step - loss: 0.9154 - val_loss: 0.4584 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=20 tf_ratio=0.000 -> TF=OFF
Epoch 20/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 10s 211ms/step - loss: 0.8899 - val_loss: 0.4230 - learning_rate: 5.0000e-04
Restoring model weights from the end of the best epoch: 16.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 1
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.71354516
1 day(s) RMSE                      : 8.55226051
1 day(s) R2                        : 0.17907865
1 day(s) Pearson r                 : 0.47996262
1 day(s) QLIKE                     : 0.31168247
full horizon MAE                   : 2.71354516
full horizon RMSE                  : 8.55226051
full horizon R2                    : 0.17907865
full horizon Pearson r             : 0.47996262
full horizon QLIKE                 : 0.31168247

--- Task 2 ---
1 day(s) MAE                       : 0.04754049
1 day(s) RMSE                      : 0.05922844
1 day(s) R2                        : -1.83194742
1 day(s) Pearson r                 : 0.12100419
1 day(s) QLIKE                     : 6.53467860
full horizon MAE                   : 0.04754049
full horizon RMSE                  : 0.05922844
full horizon R2                    : -1.83194742
full horizon Pearson r             : 0.12100419
full horizon QLIKE                 : 6.53467860

--- Task 3 ---
1 day(s) MAE                       : 0.40761475
1 day(s) RMSE                      : 0.45700569
1 day(s) R2                        : -0.32863302
1 day(s) Pearson r                 : -0.07555107
1 day(s) QLIKE                     : 8.25434363
full horizon MAE                   : 0.40761475
full horizon RMSE                  : 0.45700569
full horizon R2                    : -0.32863302
full horizon Pearson r             : -0.07555107
full horizon QLIKE                 : 8.25434363

--- Task 4 ---
1 day(s) MAE                       : 0.90181846
1 day(s) RMSE                      : 1.24244526
1 day(s) R2                        : -0.94082656
1 day(s) Pearson r                 : 0.19495360
1 day(s) QLIKE                     : 0.74399784
full horizon MAE                   : 0.90181846
full horizon RMSE                  : 1.24244526
full horizon R2                    : -0.94082656
full horizon Pearson r             : 0.19495360
full horizon QLIKE                 : 0.74399784

--- Task 5 ---
1 day(s) MAE                       : 0.09356806
1 day(s) RMSE                      : 0.11114221
1 day(s) R2                        : -2.35535254
1 day(s) Pearson r                 : -0.28737256
1 day(s) QLIKE                     : 7.94342332
full horizon MAE                   : 0.09356806
full horizon RMSE                  : 0.11114221
full horizon R2                    : -2.35535254
full horizon Pearson r             : -0.28737256
full horizon QLIKE                 : 7.94342332

--- Task 6 ---
1 day(s) MAE                       : 2.58831605
1 day(s) RMSE                      : 3.37535219
1 day(s) R2                        : -0.43982499
1 day(s) Pearson r                 : -0.10458086
1 day(s) QLIKE                     : 0.09655576
full horizon MAE                   : 2.58831605
full horizon RMSE                  : 3.37535219
full horizon R2                    : -0.43982499
full horizon Pearson r             : -0.10458086
full horizon QLIKE                 : 0.09655576

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/C/Custom_KAN_LSTM_H1.pkl

Saved y_true min=0.525001, max=134.647
Saved y_pred min=1.55051, max=24.4493

=== C | H=5 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 5
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.8091406863669
  Min value:  -0.37248441284063266
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.257460407279653
  Min value:  -7.1082338908403395
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.018765610501212
  Min value:  -0.35593837785433546
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.645753754809614
  Min value:  -1.6819253820445499
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.615585669042774
  Min value:  -0.3593653801213122
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.110793676955011
  Min value:  -7.1082338908403395
Epoch 001 | phase=1 | train_loss=5.1348 | val_main=0.481562
Epoch 002 | phase=1 | train_loss=4.8754 | val_main=0.481572
Epoch 003 | phase=1 | train_loss=4.9494 | val_main=0.481574
Epoch 004 | phase=1 | train_loss=4.6809 | val_main=0.481574
Epoch 005 | phase=1 | train_loss=4.4876 | val_main=0.481596
Epoch 006 | phase=1 | train_loss=4.4977 | val_main=0.481579
Epoch 007 | phase=1 | train_loss=4.3906 | val_main=0.481586
Epoch 008 | phase=1 | train_loss=4.3351 | val_main=0.481553
Epoch 009 | phase=1 | train_loss=4.2749 | val_main=0.481590
Epoch 010 | phase=1 | train_loss=4.1970 | val_main=0.481591
Epoch 011 | phase=1 | train_loss=4.1502 | val_main=0.481571
Epoch 012 | phase=1 | train_loss=4.0699 | val_main=0.481575
Epoch 013 | phase=1 | train_loss=3.8895 | val_main=0.481574
Epoch 014 | phase=1 | train_loss=3.8299 | val_main=0.481596
Epoch 015 | phase=1 | train_loss=3.7789 | val_main=0.481558
Epoch 016 | phase=0 | train_loss=4.7464 | val_main=0.471777
Epoch 017 | phase=0 | train_loss=4.4867 | val_main=0.448215
Epoch 018 | phase=0 | train_loss=4.3209 | val_main=0.429790
Epoch 019 | phase=0 | train_loss=4.1133 | val_main=0.467571
Epoch 020 | phase=0 | train_loss=3.9313 | val_main=0.448557
Epoch 021 | phase=0 | train_loss=3.7340 | val_main=0.444195
Epoch 022 | phase=0 | train_loss=3.7076 | val_main=0.474255
Epoch 023 | phase=0 | train_loss=3.8438 | val_main=0.462772
Epoch 024 | phase=0 | train_loss=3.4673 | val_main=0.459105
Epoch 025 | phase=0 | train_loss=3.3287 | val_main=0.446839
Epoch 026 | phase=0 | train_loss=3.2801 | val_main=0.449734
Epoch 027 | phase=0 | train_loss=3.0986 | val_main=0.450819
Epoch 028 | phase=0 | train_loss=3.0742 | val_main=0.441678
Epoch 029 | phase=0 | train_loss=2.9843 | val_main=0.447876
Epoch 030 | phase=0 | train_loss=2.8416 | val_main=0.451159
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 30
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.81258959
1 day(s) RMSE                      : 9.35761595
1 day(s) R2                        : 0.01718870
1 day(s) Pearson r                 : 0.27578490
1 day(s) QLIKE                     : 0.36929527
3 day(s) MAE                       : 2.87125284
3 day(s) RMSE                      : 9.45231513
3 day(s) R2                        : 0.00208583
3 day(s) Pearson r                 : 0.19698482
3 day(s) QLIKE                     : 0.39813070
5 day(s) MAE                       : 2.90324622
5 day(s) RMSE                      : 9.52228146
5 day(s) R2                        : -0.00951808
5 day(s) Pearson r                 : 0.14233115
5 day(s) QLIKE                     : 0.41698181
full horizon MAE                   : 2.90324622
full horizon RMSE                  : 9.52228146
full horizon R2                    : -0.00951808
full horizon Pearson r             : 0.14233115
full horizon QLIKE                 : 0.41698181

--- Task 2 ---
1 day(s) MAE                       : 0.04206224
1 day(s) RMSE                      : 0.05136770
1 day(s) R2                        : -1.13012434
1 day(s) Pearson r                 : 0.15102364
1 day(s) QLIKE                     : 11.41834592
3 day(s) MAE                       : 0.04244987
3 day(s) RMSE                      : 0.05219884
3 day(s) R2                        : -1.19749764
3 day(s) Pearson r                 : 0.14261654
3 day(s) QLIKE                     : 11.41596392
5 day(s) MAE                       : 0.04275618
5 day(s) RMSE                      : 0.05290557
5 day(s) R2                        : -1.25596257
5 day(s) Pearson r                 : 0.13970412
5 day(s) QLIKE                     : 11.54016127
full horizon MAE                   : 0.04275618
full horizon RMSE                  : 0.05290557
full horizon R2                    : -1.25596257
full horizon Pearson r             : 0.13970412
full horizon QLIKE                 : 11.54016127

--- Task 3 ---
1 day(s) MAE                       : 0.36425044
1 day(s) RMSE                      : 0.47223992
1 day(s) R2                        : -0.41868905
1 day(s) Pearson r                 : -0.03455183
1 day(s) QLIKE                     : 8.11384863
3 day(s) MAE                       : 0.36026840
3 day(s) RMSE                      : 0.46306724
3 day(s) R2                        : -0.36548531
3 day(s) Pearson r                 : 0.05272144
3 day(s) QLIKE                     : 8.10812927
5 day(s) MAE                       : 0.36028695
5 day(s) RMSE                      : 0.46244175
5 day(s) R2                        : -0.36290629
5 day(s) Pearson r                 : 0.05727163
5 day(s) QLIKE                     : 8.10779243
full horizon MAE                   : 0.36028695
full horizon RMSE                  : 0.46244175
full horizon R2                    : -0.36290629
full horizon Pearson r             : 0.05727163
full horizon QLIKE                 : 8.10779243

--- Task 4 ---
1 day(s) MAE                       : 0.89649474
1 day(s) RMSE                      : 1.17508840
1 day(s) R2                        : -0.73609417
1 day(s) Pearson r                 : -0.00219743
1 day(s) QLIKE                     : 1.53954155
3 day(s) MAE                       : 0.89545139
3 day(s) RMSE                      : 1.17317134
3 day(s) R2                        : -0.73390448
3 day(s) Pearson r                 : 0.00934464
3 day(s) QLIKE                     : 1.48551277
5 day(s) MAE                       : 0.89650666
5 day(s) RMSE                      : 1.17484503
5 day(s) R2                        : -0.74209832
5 day(s) Pearson r                 : 0.01075368
5 day(s) QLIKE                     : 1.49397613
full horizon MAE                   : 0.89650666
full horizon RMSE                  : 1.17484503
full horizon R2                    : -0.74209832
full horizon Pearson r             : 0.01075368
full horizon QLIKE                 : 1.49397613

--- Task 5 ---
1 day(s) MAE                       : 0.09159247
1 day(s) RMSE                      : 0.10872541
1 day(s) R2                        : -2.21101416
1 day(s) Pearson r                 : -0.18877933
1 day(s) QLIKE                     : 17.29055009
3 day(s) MAE                       : 0.09194021
3 day(s) RMSE                      : 0.10897440
3 day(s) R2                        : -2.24261361
3 day(s) Pearson r                 : -0.14620361
3 day(s) QLIKE                     : 17.21749604
5 day(s) MAE                       : 0.09244945
5 day(s) RMSE                      : 0.10937557
5 day(s) R2                        : -2.28274109
5 day(s) Pearson r                 : -0.14966856
5 day(s) QLIKE                     : 17.12659951
full horizon MAE                   : 0.09244945
full horizon RMSE                  : 0.10937557
full horizon R2                    : -2.28274109
full horizon Pearson r             : -0.14966856
full horizon QLIKE                 : 17.12659951

--- Task 6 ---
1 day(s) MAE                       : 6.05820615
1 day(s) RMSE                      : 12.67037239
1 day(s) R2                        : -19.28851580
1 day(s) Pearson r                 : -0.01374934
1 day(s) QLIKE                     : 0.17782813
3 day(s) MAE                       : 6.01609116
3 day(s) RMSE                      : 12.67439291
3 day(s) R2                        : -19.23688495
3 day(s) Pearson r                 : -0.01619856
3 day(s) QLIKE                     : 0.17729891
5 day(s) MAE                       : 5.99097351
5 day(s) RMSE                      : 12.54791814
5 day(s) R2                        : -18.77164835
5 day(s) Pearson r                 : -0.01816486
5 day(s) QLIKE                     : 0.17645853
full horizon MAE                   : 5.99097351
full horizon RMSE                  : 12.54791814
full horizon R2                    : -18.77164835
full horizon Pearson r             : -0.01816486
full horizon QLIKE                 : 0.17645853

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/C/Custom_KAN_H5.pkl

Saved y_true min=0.525001, max=134.647
Saved y_pred min=2.41624, max=8.33239

=== C | H=5 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2018
Time steps for y: 5
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.8091406863669
  Min value:  -0.37248441284063266
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.257460407279653
  Min value:  -7.1082338908403395
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.018765610501212
  Min value:  -0.35593837785433546
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.645753754809614
  Min value:  -1.6819253820445499
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.615585669042774
  Min value:  -0.3593653801213122
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.110793676955011
  Min value:  -7.1082338908403395
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 160s 472ms/step - loss: 0.8441 - val_loss: 0.6285 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 10s 208ms/step - loss: 0.5623 - val_loss: 0.4711 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 244ms/step - loss: 0.4862 - val_loss: 0.5405 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 249ms/step - loss: 0.4461 - val_loss: 0.5599 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 242ms/step - loss: 0.4282 - val_loss: 0.6426 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 245ms/step - loss: 0.4234 - val_loss: 0.5290 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 0s 239ms/step - loss: 0.4157
Epoch 7: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 246ms/step - loss: 0.4070 - val_loss: 0.5497 - learning_rate: 5.0000e-04
Epoch 7: early stopping
Restoring model weights from the end of the best epoch: 2.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 5
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.69396727
1 day(s) RMSE                      : 9.48835959
1 day(s) R2                        : -0.01046663
1 day(s) Pearson r                 : 0.36704942
1 day(s) QLIKE                     : 0.36577407
3 day(s) MAE                       : 2.75971011
3 day(s) RMSE                      : 9.55402852
3 day(s) R2                        : -0.01950620
3 day(s) Pearson r                 : 0.27247095
3 day(s) QLIKE                     : 0.37428451
5 day(s) MAE                       : 2.85310445
5 day(s) RMSE                      : 9.66477620
5 day(s) R2                        : -0.03995770
5 day(s) Pearson r                 : 0.17985183
5 day(s) QLIKE                     : 0.40507111
full horizon MAE                   : 2.85310445
full horizon RMSE                  : 9.66477620
full horizon R2                    : -0.03995770
full horizon Pearson r             : 0.17985183
full horizon QLIKE                 : 0.40507111

--- Task 2 ---
1 day(s) MAE                       : 0.04761363
1 day(s) RMSE                      : 0.05878679
1 day(s) R2                        : -1.78987143
1 day(s) Pearson r                 : -0.20715884
1 day(s) QLIKE                     : 6.34240586
3 day(s) MAE                       : 0.04791912
3 day(s) RMSE                      : 0.05931054
3 day(s) R2                        : -1.83707212
3 day(s) Pearson r                 : -0.00698003
3 day(s) QLIKE                     : 12.67930482
5 day(s) MAE                       : 0.04802197
5 day(s) RMSE                      : 0.05946244
5 day(s) R2                        : -1.84980047
5 day(s) Pearson r                 : -0.00549590
5 day(s) QLIKE                     : 14.28998983
full horizon MAE                   : 0.04802197
full horizon RMSE                  : 0.05946244
full horizon R2                    : -1.84980047
full horizon Pearson r             : -0.00549590
full horizon QLIKE                 : 14.28998983

--- Task 3 ---
1 day(s) MAE                       : 0.46502720
1 day(s) RMSE                      : 0.50976105
1 day(s) R2                        : -0.65308475
1 day(s) Pearson r                 : -0.07147644
1 day(s) QLIKE                     : 8.11236399
3 day(s) MAE                       : 0.51199517
3 day(s) RMSE                      : 0.60672156
3 day(s) R2                        : -1.34410885
3 day(s) Pearson r                 : -0.00726741
3 day(s) QLIKE                     : 8.89525420
5 day(s) MAE                       : 0.52587318
5 day(s) RMSE                      : 0.63448540
5 day(s) R2                        : -1.56563682
5 day(s) Pearson r                 : -0.00232908
5 day(s) QLIKE                     : 9.56655877
full horizon MAE                   : 0.52587318
full horizon RMSE                  : 0.63448540
full horizon R2                    : -1.56563682
full horizon Pearson r             : -0.00232908
full horizon QLIKE                 : 9.56655877

--- Task 4 ---
1 day(s) MAE                       : 0.89898252
1 day(s) RMSE                      : 1.24035384
1 day(s) R2                        : -0.93429804
1 day(s) Pearson r                 : -0.06987622
1 day(s) QLIKE                     : 0.75891139
3 day(s) MAE                       : 0.91167226
3 day(s) RMSE                      : 1.25578999
3 day(s) R2                        : -0.98671839
3 day(s) Pearson r                 : -0.00645656
3 day(s) QLIKE                     : 0.76808254
5 day(s) MAE                       : 0.90780847
5 day(s) RMSE                      : 1.24942050
5 day(s) R2                        : -0.97028362
5 day(s) Pearson r                 : -0.01572968
5 day(s) QLIKE                     : 0.80512244
full horizon MAE                   : 0.90780847
full horizon RMSE                  : 1.24942050
full horizon R2                    : -0.97028362
full horizon Pearson r             : -0.01572968
full horizon QLIKE                 : 0.80512244

--- Task 5 ---
1 day(s) MAE                       : 0.09541525
1 day(s) RMSE                      : 0.11306706
1 day(s) R2                        : -2.47258008
1 day(s) Pearson r                 : 0.01627748
1 day(s) QLIKE                     : 2.98989303
3 day(s) MAE                       : 0.09554367
3 day(s) RMSE                      : 0.11304494
3 day(s) R2                        : -2.48938170
3 day(s) Pearson r                 : -0.03638540
3 day(s) QLIKE                     : 5.49485270
5 day(s) MAE                       : 0.08704359
5 day(s) RMSE                      : 0.10455213
5 day(s) R2                        : -1.99958867
5 day(s) Pearson r                 : -0.04870840
5 day(s) QLIKE                     : 18.76738862
full horizon MAE                   : 0.08704359
full horizon RMSE                  : 0.10455213
full horizon R2                    : -1.99958867
full horizon Pearson r             : -0.04870840
full horizon QLIKE                 : 18.76738862

--- Task 6 ---
1 day(s) MAE                       : 2.45225463
1 day(s) RMSE                      : 2.98823339
1 day(s) R2                        : -0.12849736
1 day(s) Pearson r                 : -0.15384484
1 day(s) QLIKE                     : 0.07527270
3 day(s) MAE                       : 2.39435286
3 day(s) RMSE                      : 3.11571541
3 day(s) R2                        : -0.22293928
3 day(s) Pearson r                 : -0.12518770
3 day(s) QLIKE                     : 0.08531419
5 day(s) MAE                       : 2.37420276
5 day(s) RMSE                      : 3.25329064
5 day(s) R2                        : -0.32906203
5 day(s) Pearson r                 : -0.12376945
5 day(s) QLIKE                     : 0.09381364
full horizon MAE                   : 2.37420276
full horizon RMSE                  : 3.25329064
full horizon R2                    : -0.32906203
full horizon Pearson r             : -0.12376945
full horizon QLIKE                 : 0.09381364

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/C/Custom_KAN_LSTM_H5.pkl

Saved y_true min=0.525001, max=134.647
Saved y_pred min=1.90066, max=5.50519

=== C | H=10 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 10
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.8091406863669
  Min value:  -0.37248441284063266
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.258708812864733
  Min value:  -7.108083650974834
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.018765610501212
  Min value:  -0.35593837785433546
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.6439291882044116
  Min value:  -1.6744596134591672
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.615585669042774
  Min value:  -0.3593653801213122
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.108415972443237
  Min value:  -7.108083650974834
Epoch 001 | phase=1 | train_loss=5.1072 | val_main=0.475461
Epoch 002 | phase=1 | train_loss=4.8907 | val_main=0.475508
Epoch 003 | phase=1 | train_loss=4.9400 | val_main=0.475549
Epoch 004 | phase=1 | train_loss=4.7280 | val_main=0.475528
Epoch 005 | phase=1 | train_loss=4.5189 | val_main=0.475489
Epoch 006 | phase=1 | train_loss=4.5013 | val_main=0.475496
Epoch 007 | phase=1 | train_loss=4.4361 | val_main=0.475449
Epoch 008 | phase=1 | train_loss=4.3720 | val_main=0.475415
Epoch 009 | phase=1 | train_loss=4.2968 | val_main=0.475425
Epoch 010 | phase=1 | train_loss=4.2373 | val_main=0.475434
Epoch 011 | phase=1 | train_loss=4.1423 | val_main=0.475399
Epoch 012 | phase=1 | train_loss=4.1129 | val_main=0.475491
Epoch 013 | phase=1 | train_loss=3.8901 | val_main=0.475497
Epoch 014 | phase=1 | train_loss=3.8268 | val_main=0.475574
Epoch 015 | phase=1 | train_loss=3.7647 | val_main=0.475539
Epoch 016 | phase=0 | train_loss=4.7099 | val_main=0.468047
Epoch 017 | phase=0 | train_loss=4.4455 | val_main=0.447530
Epoch 018 | phase=0 | train_loss=4.3422 | val_main=0.432141
Epoch 019 | phase=0 | train_loss=4.0866 | val_main=0.481243
Epoch 020 | phase=0 | train_loss=3.9383 | val_main=0.461131
Epoch 021 | phase=0 | train_loss=3.7340 | val_main=0.446223
Epoch 022 | phase=0 | train_loss=3.6728 | val_main=0.469967
Epoch 023 | phase=0 | train_loss=3.7023 | val_main=0.474429
Epoch 024 | phase=0 | train_loss=3.4589 | val_main=0.488177
Epoch 025 | phase=0 | train_loss=3.3468 | val_main=0.491572
Epoch 026 | phase=0 | train_loss=3.3162 | val_main=0.495334
Epoch 027 | phase=0 | train_loss=3.1559 | val_main=0.501951
Epoch 028 | phase=0 | train_loss=3.0838 | val_main=0.504740
Epoch 029 | phase=0 | train_loss=2.9824 | val_main=0.508911
Epoch 030 | phase=0 | train_loss=2.8697 | val_main=0.516703
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 60
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.75687601
1 day(s) RMSE                      : 9.41009338
1 day(s) R2                        : 0.00613460
1 day(s) Pearson r                 : 0.30048067
1 day(s) QLIKE                     : 0.37109330
3 day(s) MAE                       : 2.80449502
3 day(s) RMSE                      : 9.49034508
3 day(s) R2                        : -0.00596023
3 day(s) Pearson r                 : 0.22781027
3 day(s) QLIKE                     : 0.39325458
5 day(s) MAE                       : 2.84439045
5 day(s) RMSE                      : 9.55325999
5 day(s) R2                        : -0.01609723
5 day(s) Pearson r                 : 0.15825624
5 day(s) QLIKE                     : 0.41146128
10 day(s) MAE                      : 2.88794081
10 day(s) RMSE                     : 9.61866700
10 day(s) R2                       : -0.02765411
10 day(s) Pearson r                : 0.08317723
10 day(s) QLIKE                    : 0.42968427
full horizon MAE                   : 2.88794081
full horizon RMSE                  : 9.61866700
full horizon R2                    : -0.02765411
full horizon Pearson r             : 0.08317723
full horizon QLIKE                 : 0.42968427

--- Task 2 ---
1 day(s) MAE                       : 0.04285748
1 day(s) RMSE                      : 0.05380755
1 day(s) R2                        : -1.33728187
1 day(s) Pearson r                 : 0.13992955
1 day(s) QLIKE                     : 10.80045566
3 day(s) MAE                       : 0.04387259
3 day(s) RMSE                      : 0.05483868
3 day(s) R2                        : -1.42538486
3 day(s) Pearson r                 : 0.12564812
3 day(s) QLIKE                     : 11.18085511
5 day(s) MAE                       : 0.04401007
5 day(s) RMSE                      : 0.05495928
5 day(s) R2                        : -1.43450739
5 day(s) Pearson r                 : 0.12259989
5 day(s) QLIKE                     : 11.25653447
10 day(s) MAE                      : 0.04410025
10 day(s) RMSE                     : 0.05496162
10 day(s) R2                       : -1.42859963
10 day(s) Pearson r                : 0.12820534
10 day(s) QLIKE                    : 11.24008313
full horizon MAE                   : 0.04410025
full horizon RMSE                  : 0.05496162
full horizon R2                    : -1.42859963
full horizon Pearson r             : 0.12820534
full horizon QLIKE                 : 11.24008313

--- Task 3 ---
1 day(s) MAE                       : 0.35944364
1 day(s) RMSE                      : 0.46567036
1 day(s) R2                        : -0.37949150
1 day(s) Pearson r                 : 0.01510768
1 day(s) QLIKE                     : 8.11134702
3 day(s) MAE                       : 0.35481170
3 day(s) RMSE                      : 0.46289419
3 day(s) R2                        : -0.36446495
3 day(s) Pearson r                 : 0.07260277
3 day(s) QLIKE                     : 8.10500204
5 day(s) MAE                       : 0.35363441
5 day(s) RMSE                      : 0.45932601
5 day(s) R2                        : -0.34460277
5 day(s) Pearson r                 : 0.09887697
5 day(s) QLIKE                     : 8.10300449
10 day(s) MAE                      : 0.35192149
10 day(s) RMSE                     : 0.45631530
10 day(s) R2                       : -0.32973092
10 day(s) Pearson r                : 0.10138083
10 day(s) QLIKE                    : 8.10233020
full horizon MAE                   : 0.35192149
full horizon RMSE                  : 0.45631530
full horizon R2                    : -0.32973092
full horizon Pearson r             : 0.10138083
full horizon QLIKE                 : 8.10233020

--- Task 4 ---
1 day(s) MAE                       : 0.89333479
1 day(s) RMSE                      : 1.16822661
1 day(s) R2                        : -0.71587794
1 day(s) Pearson r                 : 0.00518326
1 day(s) QLIKE                     : 1.83567341
3 day(s) MAE                       : 0.89323022
3 day(s) RMSE                      : 1.16757208
3 day(s) R2                        : -0.71739295
3 day(s) Pearson r                 : 0.01636380
3 day(s) QLIKE                     : 1.80769906
5 day(s) MAE                       : 0.89407502
5 day(s) RMSE                      : 1.16809848
5 day(s) R2                        : -0.72214776
5 day(s) Pearson r                 : 0.02183845
5 day(s) QLIKE                     : 1.77303893
10 day(s) MAE                      : 0.89575092
10 day(s) RMSE                     : 1.17222413
10 day(s) R2                       : -0.74156892
10 day(s) Pearson r                : -0.00264865
10 day(s) QLIKE                    : 1.83906240
full horizon MAE                   : 0.89575092
full horizon RMSE                  : 1.17222413
full horizon R2                    : -0.74156892
full horizon Pearson r             : -0.00264865
full horizon QLIKE                 : 1.83906240

--- Task 5 ---
1 day(s) MAE                       : 0.09261480
1 day(s) RMSE                      : 0.10970537
1 day(s) R2                        : -2.26915754
1 day(s) Pearson r                 : -0.11753302
1 day(s) QLIKE                     : 16.22674390
3 day(s) MAE                       : 0.09286057
3 day(s) RMSE                      : 0.10994249
3 day(s) R2                        : -2.30048196
3 day(s) Pearson r                 : -0.09434213
3 day(s) QLIKE                     : 16.17299012
5 day(s) MAE                       : 0.09315099
5 day(s) RMSE                      : 0.11020612
5 day(s) R2                        : -2.33278530
5 day(s) Pearson r                 : -0.07041597
5 day(s) QLIKE                     : 16.13134171
10 day(s) MAE                      : 0.09414490
10 day(s) RMSE                     : 0.11105979
10 day(s) R2                       : -2.42566862
10 day(s) Pearson r                : -0.04529818
10 day(s) QLIKE                    : 15.93317347
full horizon MAE                   : 0.09414490
full horizon RMSE                  : 0.11105979
full horizon R2                    : -2.42566862
full horizon Pearson r             : -0.04529818
full horizon QLIKE                 : 15.93317347

--- Task 6 ---
1 day(s) MAE                       : 8.57915605
1 day(s) RMSE                      : 16.84642485
1 day(s) R2                        : -34.86633635
1 day(s) Pearson r                 : -0.02953881
1 day(s) QLIKE                     : 0.23078084
3 day(s) MAE                       : 8.81805706
3 day(s) RMSE                      : 17.42709524
3 day(s) R2                        : -37.25950437
3 day(s) Pearson r                 : -0.03391761
3 day(s) QLIKE                     : 0.23319892
5 day(s) MAE                       : 8.86578581
5 day(s) RMSE                      : 17.51997630
5 day(s) R2                        : -37.54486714
5 day(s) Pearson r                 : -0.03647334
5 day(s) QLIKE                     : 0.23257488
10 day(s) MAE                      : 8.77599253
10 day(s) RMSE                     : 17.17604598
10 day(s) R2                       : -35.79102513
10 day(s) Pearson r                : -0.03597465
10 day(s) QLIKE                    : 0.22894757
full horizon MAE                   : 8.77599253
full horizon RMSE                  : 17.17604598
full horizon R2                    : -35.79102513
full horizon Pearson r             : -0.03597465
full horizon QLIKE                 : 0.22894757

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/C/Custom_KAN_H10.pkl

Saved y_true min=0.525001, max=134.647
Saved y_pred min=2.28983, max=7.66594

=== C | H=10 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2018
Time steps for y: 10
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.8091406863669
  Min value:  -0.37248441284063266
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.258708812864733
  Min value:  -7.108083650974834
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.018765610501212
  Min value:  -0.35593837785433546
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.6439291882044116
  Min value:  -1.6744596134591672
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.615585669042774
  Min value:  -0.3593653801213122
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.108415972443237
  Min value:  -7.108083650974834
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 425s 404ms/step - loss: 0.8125 - val_loss: 0.5196 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 11s 246ms/step - loss: 0.4910 - val_loss: 0.5119 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 15s 325ms/step - loss: 0.4164 - val_loss: 0.7699 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 15s 320ms/step - loss: 0.3869 - val_loss: 0.7491 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 15s 316ms/step - loss: 0.3731 - val_loss: 0.8067 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 14s 315ms/step - loss: 0.3578 - val_loss: 0.7559 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 0s 308ms/step - loss: 0.3544
Epoch 7: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
46/46 ━━━━━━━━━━━━━━━━━━━━ 15s 317ms/step - loss: 0.3418 - val_loss: 0.7327 - learning_rate: 5.0000e-04
Epoch 7: early stopping
Restoring model weights from the end of the best epoch: 2.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 10
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.86444877
1 day(s) RMSE                      : 9.50768393
1 day(s) R2                        : -0.01458673
1 day(s) Pearson r                 : 0.43263680
1 day(s) QLIKE                     : 0.42243525
3 day(s) MAE                       : 2.88638044
3 day(s) RMSE                      : 9.53737094
3 day(s) R2                        : -0.01595425
3 day(s) Pearson r                 : 0.10195349
3 day(s) QLIKE                     : 0.42391406
5 day(s) MAE                       : 2.87337046
5 day(s) RMSE                      : 9.59963357
5 day(s) R2                        : -0.02598588
5 day(s) Pearson r                 : 0.04277518
5 day(s) QLIKE                     : 0.43240128
10 day(s) MAE                      : 2.93984828
10 day(s) RMSE                     : 9.73968935
10 day(s) R2                       : -0.05367674
10 day(s) Pearson r                : 0.01033502
10 day(s) QLIKE                    : 0.45746464
full horizon MAE                   : 2.93984828
full horizon RMSE                  : 9.73968935
full horizon R2                    : -0.05367674
full horizon Pearson r             : 0.01033502
full horizon QLIKE                 : 0.45746464

--- Task 2 ---
1 day(s) MAE                       : 0.04784451
1 day(s) RMSE                      : 0.05922374
1 day(s) R2                        : -1.83149804
1 day(s) Pearson r                 : 0.10112701
1 day(s) QLIKE                     : 6.32539975
3 day(s) MAE                       : 0.04801034
3 day(s) RMSE                      : 0.05948061
3 day(s) R2                        : -1.85336630
3 day(s) Pearson r                 : 0.00977339
3 day(s) QLIKE                     : 12.80200919
5 day(s) MAE                       : 0.04807670
5 day(s) RMSE                      : 0.05956428
5 day(s) R2                        : -1.85957073
5 day(s) Pearson r                 : 0.00623712
5 day(s) QLIKE                     : 12.16040223
10 day(s) MAE                      : 0.04820381
10 day(s) RMSE                     : 0.05971062
10 day(s) R2                       : -1.86642172
10 day(s) Pearson r                : 0.00307702
10 day(s) QLIKE                    : 9.65534967
full horizon MAE                   : 0.04820381
full horizon RMSE                  : 0.05971062
full horizon R2                    : -1.86642172
full horizon Pearson r             : 0.00307702
full horizon QLIKE                 : 9.65534967

--- Task 3 ---
1 day(s) MAE                       : 0.40512963
1 day(s) RMSE                      : 0.42586146
1 day(s) R2                        : -0.15371489
1 day(s) Pearson r                 : 0.04719178
1 day(s) QLIKE                     : 8.10475013
3 day(s) MAE                       : 0.38944571
3 day(s) RMSE                      : 0.41317065
3 day(s) R2                        : -0.08707088
3 day(s) Pearson r                 : -0.00855839
3 day(s) QLIKE                     : 8.10886338
5 day(s) MAE                       : 0.40165718
5 day(s) RMSE                      : 0.42833697
5 day(s) R2                        : -0.16929218
5 day(s) Pearson r                 : -0.00356796
5 day(s) QLIKE                     : 8.12746397
10 day(s) MAE                      : 0.46473319
10 day(s) RMSE                     : 0.54422583
10 day(s) R2                       : -0.89143748
10 day(s) Pearson r                : 0.00320659
10 day(s) QLIKE                    : 11.65067107
full horizon MAE                   : 0.46473319
full horizon RMSE                  : 0.54422583
full horizon R2                    : -0.89143748
full horizon Pearson r             : 0.00320659
full horizon QLIKE                 : 11.65067107

--- Task 4 ---
1 day(s) MAE                       : 0.90721581
1 day(s) RMSE                      : 1.25122170
1 day(s) R2                        : -0.96834280
1 day(s) Pearson r                 : -0.08311230
1 day(s) QLIKE                     : 0.75900153
3 day(s) MAE                       : 0.89455100
3 day(s) RMSE                      : 1.23283106
3 day(s) R2                        : -0.91473824
3 day(s) Pearson r                 : -0.00234118
3 day(s) QLIKE                     : 0.77200297
5 day(s) MAE                       : 0.89424917
5 day(s) RMSE                      : 1.23100686
5 day(s) R2                        : -0.91263651
5 day(s) Pearson r                 : -0.00378255
5 day(s) QLIKE                     : 0.76566025
10 day(s) MAE                      : 0.88722795
10 day(s) RMSE                     : 1.21705750
10 day(s) R2                       : -0.87733401
10 day(s) Pearson r                : -0.02560954
10 day(s) QLIKE                    : 0.78229871
full horizon MAE                   : 0.88722795
full horizon RMSE                  : 1.21705750
full horizon R2                    : -0.87733401
full horizon Pearson r             : -0.02560954
full horizon QLIKE                 : 0.78229871

--- Task 5 ---
1 day(s) MAE                       : 0.09537291
1 day(s) RMSE                      : 0.11302090
1 day(s) R2                        : -2.46974543
1 day(s) Pearson r                 : 0.15699271
1 day(s) QLIKE                     : 2.97427285
3 day(s) MAE                       : 0.09433927
3 day(s) RMSE                      : 0.11173776
3 day(s) R2                        : -2.40915004
3 day(s) Pearson r                 : 0.00111850
3 day(s) QLIKE                     : 4.07047820
5 day(s) MAE                       : 0.09020567
5 day(s) RMSE                      : 0.10706623
5 day(s) R2                        : -2.14558145
5 day(s) Pearson r                 : 0.01019528
5 day(s) QLIKE                     : 4.75006086
10 day(s) MAE                      : 0.07386198
10 day(s) RMSE                     : 0.08982772
10 day(s) R2                       : -1.24105436
10 day(s) Pearson r                : 0.01710720
10 day(s) QLIKE                    : 5.56723877
full horizon MAE                   : 0.07386198
full horizon RMSE                  : 0.08982772
full horizon R2                    : -1.24105436
full horizon Pearson r             : 0.01710720
full horizon QLIKE                 : 5.56723877

--- Task 6 ---
1 day(s) MAE                       : 2.56234880
1 day(s) RMSE                      : 3.00519508
1 day(s) R2                        : -0.14134478
1 day(s) Pearson r                 : -0.14606834
1 day(s) QLIKE                     : 0.07121525
3 day(s) MAE                       : 2.62162212
3 day(s) RMSE                      : 3.06274207
3 day(s) R2                        : -0.18170801
3 day(s) Pearson r                 : -0.10533276
3 day(s) QLIKE                     : 0.07260597
5 day(s) MAE                       : 2.58102661
5 day(s) RMSE                      : 3.04259337
5 day(s) R2                        : -0.16248499
5 day(s) Pearson r                 : -0.07014008
5 day(s) QLIKE                     : 0.07376350
10 day(s) MAE                      : 2.37507478
10 day(s) RMSE                     : 3.07596353
10 day(s) R2                       : -0.17993440
10 day(s) Pearson r                : -0.03443858
10 day(s) QLIKE                    : 0.08944535
full horizon MAE                   : 2.37507478
full horizon RMSE                  : 3.07596353
full horizon R2                    : -0.17993440
full horizon Pearson r             : -0.03443858
full horizon QLIKE                 : 0.08944535

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/C/Custom_KAN_LSTM_H10.pkl

Saved y_true min=0.525001, max=134.647
Saved y_pred min=1.9107, max=4.20924

=== C | H=20 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2018
Time steps for y: 20
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.8091406863669
  Min value:  -0.37248441284063266
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.261857345977599
  Min value:  -7.10781787362285
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.018765610501212
  Min value:  -0.35593837785433546
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.643530346347732
  Min value:  -1.6643841678427629
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.615585669042774
  Min value:  -0.3593653801213122
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.108493380986118
  Min value:  -7.10781787362285
Epoch 001 | phase=1 | train_loss=5.0594 | val_main=0.477759
Epoch 002 | phase=1 | train_loss=4.9460 | val_main=0.477816
Epoch 003 | phase=1 | train_loss=4.9455 | val_main=0.477896
Epoch 004 | phase=1 | train_loss=4.8750 | val_main=0.477904
Epoch 005 | phase=1 | train_loss=4.6277 | val_main=0.477783
Epoch 006 | phase=1 | train_loss=4.5487 | val_main=0.477868
Epoch 007 | phase=1 | train_loss=4.4581 | val_main=0.477752
Epoch 008 | phase=1 | train_loss=4.4056 | val_main=0.477741
Epoch 009 | phase=1 | train_loss=4.3175 | val_main=0.477828
Epoch 010 | phase=1 | train_loss=4.2595 | val_main=0.477648
Epoch 011 | phase=1 | train_loss=4.1199 | val_main=0.477672
Epoch 012 | phase=1 | train_loss=4.1041 | val_main=0.477792
Epoch 013 | phase=1 | train_loss=3.8882 | val_main=0.477751
Epoch 014 | phase=1 | train_loss=3.7979 | val_main=0.477913
Epoch 015 | phase=1 | train_loss=3.6843 | val_main=0.477831
Epoch 016 | phase=0 | train_loss=4.5835 | val_main=0.472573
Epoch 017 | phase=0 | train_loss=4.3468 | val_main=0.454521
Epoch 018 | phase=0 | train_loss=4.2603 | val_main=0.440720
Epoch 019 | phase=0 | train_loss=3.9442 | val_main=0.515534
Epoch 020 | phase=0 | train_loss=3.7751 | val_main=0.500241
Epoch 021 | phase=0 | train_loss=3.6935 | val_main=0.506281
Epoch 022 | phase=0 | train_loss=3.6109 | val_main=0.523840
Epoch 023 | phase=0 | train_loss=3.6362 | val_main=0.526603
Epoch 024 | phase=0 | train_loss=3.4111 | val_main=0.543340
Epoch 025 | phase=0 | train_loss=3.2900 | val_main=0.550049
Epoch 026 | phase=0 | train_loss=3.3329 | val_main=0.568856
Epoch 027 | phase=0 | train_loss=3.1979 | val_main=0.574094
Epoch 028 | phase=0 | train_loss=3.1224 | val_main=0.572261
Epoch 029 | phase=0 | train_loss=3.0316 | val_main=0.588008
Epoch 030 | phase=0 | train_loss=2.9750 | val_main=0.588131
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 120
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.79908752
1 day(s) RMSE                      : 9.36745275
1 day(s) R2                        : 0.01512134
1 day(s) Pearson r                 : 0.27770720
1 day(s) QLIKE                     : 0.37179724
3 day(s) MAE                       : 2.84848915
3 day(s) RMSE                      : 9.47442347
3 day(s) R2                        : -0.00258773
3 day(s) Pearson r                 : 0.20221246
3 day(s) QLIKE                     : 0.39717069
5 day(s) MAE                       : 2.88421330
5 day(s) RMSE                      : 9.52867787
5 day(s) R2                        : -0.01087478
5 day(s) Pearson r                 : 0.14752651
5 day(s) QLIKE                     : 0.41218318
10 day(s) MAE                      : 2.92601498
10 day(s) RMSE                     : 9.59210727
10 day(s) R2                       : -0.02198668
10 day(s) Pearson r                : 0.07916104
10 day(s) QLIKE                    : 0.42880035
20 day(s) MAE                      : 2.95265978
20 day(s) RMSE                     : 9.62752967
20 day(s) R2                       : -0.02873320
20 day(s) Pearson r                : 0.03453393
20 day(s) QLIKE                    : 0.43681860
full horizon MAE                   : 2.95265978
full horizon RMSE                  : 9.62752967
full horizon R2                    : -0.02873320
full horizon Pearson r             : 0.03453393
full horizon QLIKE                 : 0.43681860

--- Task 2 ---
1 day(s) MAE                       : 0.04605167
1 day(s) RMSE                      : 0.05652458
1 day(s) R2                        : -1.57928508
1 day(s) Pearson r                 : 0.10114606
1 day(s) QLIKE                     : 10.36176947
3 day(s) MAE                       : 0.04684801
3 day(s) RMSE                      : 0.05848394
3 day(s) R2                        : -1.75854442
3 day(s) Pearson r                 : 0.08757062
3 day(s) QLIKE                     : 10.46015496
5 day(s) MAE                       : 0.04657109
5 day(s) RMSE                      : 0.05822069
5 day(s) R2                        : -1.73201879
5 day(s) Pearson r                 : 0.08097165
5 day(s) QLIKE                     : 10.58860050
10 day(s) MAE                      : 0.04539091
10 day(s) RMSE                     : 0.05677932
10 day(s) R2                       : -1.59189465
10 day(s) Pearson r                : 0.10163745
10 day(s) QLIKE                    : 10.91881043
20 day(s) MAE                      : 0.04314326
20 day(s) RMSE                     : 0.05420691
20 day(s) R2                       : -1.36370895
20 day(s) Pearson r                : 0.12765531
20 day(s) QLIKE                    : 11.26722821
full horizon MAE                   : 0.04314326
full horizon RMSE                  : 0.05420691
full horizon R2                    : -1.36370895
full horizon Pearson r             : 0.12765531
full horizon QLIKE                 : 11.26722821

--- Task 3 ---
1 day(s) MAE                       : 0.34618562
1 day(s) RMSE                      : 0.45123722
1 day(s) R2                        : -0.29530386
1 day(s) Pearson r                 : -0.00343574
1 day(s) QLIKE                     : 8.10848135
3 day(s) MAE                       : 0.35105368
3 day(s) RMSE                      : 0.46069360
3 day(s) R2                        : -0.35152252
3 day(s) Pearson r                 : 0.05303426
3 day(s) QLIKE                     : 8.10582277
5 day(s) MAE                       : 0.35050834
5 day(s) RMSE                      : 0.45738129
5 day(s) R2                        : -0.33324117
5 day(s) Pearson r                 : 0.07990378
5 day(s) QLIKE                     : 8.10440189
10 day(s) MAE                      : 0.34691906
10 day(s) RMSE                     : 0.45145800
10 day(s) R2                       : -0.30157263
10 day(s) Pearson r                : 0.11194583
10 day(s) QLIKE                    : 8.10167867
20 day(s) MAE                      : 0.34642497
20 day(s) RMSE                     : 0.45203979
20 day(s) R2                       : -0.30579113
20 day(s) Pearson r                : 0.07863451
20 day(s) QLIKE                    : 8.10536868
full horizon MAE                   : 0.34642497
full horizon RMSE                  : 0.45203979
full horizon R2                    : -0.30579113
full horizon Pearson r             : 0.07863451
full horizon QLIKE                 : 8.10536868

--- Task 4 ---
1 day(s) MAE                       : 0.87579439
1 day(s) RMSE                      : 1.14645948
1 day(s) R2                        : -0.65253102
1 day(s) Pearson r                 : 0.11281698
1 day(s) QLIKE                     : 1.59474037
3 day(s) MAE                       : 0.87554933
3 day(s) RMSE                      : 1.14452791
3 day(s) R2                        : -0.65027018
3 day(s) Pearson r                 : 0.11678296
3 day(s) QLIKE                     : 1.54145219
5 day(s) MAE                       : 0.87547930
5 day(s) RMSE                      : 1.14225321
5 day(s) R2                        : -0.64678261
5 day(s) Pearson r                 : 0.11759082
5 day(s) QLIKE                     : 1.52873657
10 day(s) MAE                      : 0.87793399
10 day(s) RMSE                     : 1.14489433
10 day(s) R2                       : -0.66130801
10 day(s) Pearson r                : 0.07942450
10 day(s) QLIKE                    : 1.56586619
20 day(s) MAE                      : 0.87281511
20 day(s) RMSE                     : 1.14512376
20 day(s) R2                       : -0.67091998
20 day(s) Pearson r                : 0.06729956
20 day(s) QLIKE                    : 1.54162125
full horizon MAE                   : 0.87281511
full horizon RMSE                  : 1.14512376
full horizon R2                    : -0.67091998
full horizon Pearson r             : 0.06729956
full horizon QLIKE                 : 1.54162125

--- Task 5 ---
1 day(s) MAE                       : 0.09181071
1 day(s) RMSE                      : 0.10881783
1 day(s) R2                        : -2.21647542
1 day(s) Pearson r                 : -0.00702690
1 day(s) QLIKE                     : 16.41001066
3 day(s) MAE                       : 0.09222536
3 day(s) RMSE                      : 0.10939793
3 day(s) R2                        : -2.26786745
3 day(s) Pearson r                 : -0.01956715
3 day(s) QLIKE                     : 16.35031574
5 day(s) MAE                       : 0.09282414
5 day(s) RMSE                      : 0.10988344
5 day(s) R2                        : -2.31329749
5 day(s) Pearson r                 : -0.02245796
5 day(s) QLIKE                     : 16.17676682
10 day(s) MAE                      : 0.09391447
10 day(s) RMSE                     : 0.11079072
10 day(s) R2                       : -2.40908960
10 day(s) Pearson r                : -0.00201191
10 day(s) QLIKE                    : 15.88844059
20 day(s) MAE                      : 0.09602308
20 day(s) RMSE                     : 0.11244777
20 day(s) R2                       : -2.59890523
20 day(s) Pearson r                : 0.01209877
20 day(s) QLIKE                    : 15.31222530
full horizon MAE                   : 0.09602308
full horizon RMSE                  : 0.11244777
full horizon R2                    : -2.59890523
full horizon Pearson r             : 0.01209877
full horizon QLIKE                 : 15.31222530

--- Task 6 ---
1 day(s) MAE                       : 14.52232725
1 day(s) RMSE                      : 26.58473431
1 day(s) R2                        : -88.31741962
1 day(s) Pearson r                 : -0.07910425
1 day(s) QLIKE                     : 0.38653082
3 day(s) MAE                       : 14.35607613
3 day(s) RMSE                      : 26.74119586
3 day(s) R2                        : -89.08468829
3 day(s) Pearson r                 : -0.07648542
3 day(s) QLIKE                     : 0.38099025
5 day(s) MAE                       : 14.52241238
5 day(s) RMSE                      : 27.26163423
5 day(s) R2                        : -92.32613869
5 day(s) Pearson r                 : -0.07416201
5 day(s) QLIKE                     : 0.38225422
10 day(s) MAE                      : 14.94328514
10 day(s) RMSE                     : 28.05476513
10 day(s) R2                       : -97.15417453
10 day(s) Pearson r                : -0.06918519
10 day(s) QLIKE                    : 0.38613410
20 day(s) MAE                      : 14.32650769
20 day(s) RMSE                     : 26.29399743
20 day(s) R2                       : -83.98417017
20 day(s) Pearson r                : -0.06566913
20 day(s) QLIKE                    : 0.37112246
full horizon MAE                   : 14.32650769
full horizon RMSE                  : 26.29399743
full horizon R2                    : -83.98417017
full horizon Pearson r             : -0.06566913
full horizon QLIKE                 : 0.37112246

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/C/Custom_KAN_H20.pkl

Saved y_true min=0.525001, max=134.647
Saved y_pred min=2.25159, max=7.9084

=== C | H=20 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2018
Time steps for y: 20
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1453, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  21.8091406863669
  Min value:  -0.37248441284063266
Checking X_time_train_core:
Shape: (1453, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.800915059933457
Checking y_train_core (log_mse scaled):
Shape: (1453, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  5.261857345977599
  Min value:  -7.10781787362285
Checking X_price_val:
Shape: (161, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.018765610501212
  Min value:  -0.35593837785433546
Checking X_time_val:
Shape: (161, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5949981610828599
Checking y_val (log_mse scaled):
Shape: (161, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.643530346347732
  Min value:  -1.6643841678427629
Checking X_price_test:
Shape: (404, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.615585669042774
  Min value:  -0.3593653801213122
Checking X_time_test:
Shape: (404, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.481940465687973
  Min value:  -1.5797626144150565
Checking y_test (log_mse scaled):
Shape: (404, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.108493380986118
  Min value:  -7.10781787362285
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 109s 517ms/step - loss: 0.8023 - val_loss: 0.4728 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 14s 313ms/step - loss: 0.4596 - val_loss: 0.4865 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 455ms/step - loss: 0.3820 - val_loss: 0.7147 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 22s 477ms/step - loss: 0.3502 - val_loss: 0.7938 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 22s 486ms/step - loss: 0.3296 - val_loss: 0.8101 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
46/46 ━━━━━━━━━━━━━━━━━━━━ 0s 438ms/step - loss: 0.3231
Epoch 6: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
46/46 ━━━━━━━━━━━━━━━━━━━━ 21s 451ms/step - loss: 0.3248 - val_loss: 0.8366 - learning_rate: 5.0000e-04
Epoch 6: early stopping
Restoring model weights from the end of the best epoch: 1.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 20
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 2.85991588
1 day(s) RMSE                      : 9.52586378
1 day(s) R2                        : -0.01847047
1 day(s) Pearson r                 : -0.38635034
1 day(s) QLIKE                     : 0.43025248
3 day(s) MAE                       : 2.90890065
3 day(s) RMSE                      : 9.54555235
3 day(s) R2                        : -0.01769802
3 day(s) Pearson r                 : 0.01181776
3 day(s) QLIKE                     : 0.43198048
5 day(s) MAE                       : 2.91443877
5 day(s) RMSE                      : 9.57446994
5 day(s) R2                        : -0.02061407
5 day(s) Pearson r                 : 0.00474146
5 day(s) QLIKE                     : 0.43427245
10 day(s) MAE                      : 2.92045005
10 day(s) RMSE                     : 9.60115429
10 day(s) R2                       : -0.02391541
10 day(s) Pearson r                : 0.00307696
10 day(s) QLIKE                    : 0.43474130
20 day(s) MAE                      : 2.93472500
20 day(s) RMSE                     : 9.60854443
20 day(s) R2                       : -0.02467993
20 day(s) Pearson r                : 0.00462733
20 day(s) QLIKE                    : 0.43277833
full horizon MAE                   : 2.93472500
full horizon RMSE                  : 9.60854443
full horizon R2                    : -0.02467993
full horizon Pearson r             : 0.00462733
full horizon QLIKE                 : 0.43277833

--- Task 2 ---
1 day(s) MAE                       : 0.04790468
1 day(s) RMSE                      : 0.05933481
1 day(s) R2                        : -1.84212916
1 day(s) Pearson r                 : 0.15491253
1 day(s) QLIKE                     : 6.32586953
3 day(s) MAE                       : 0.04803090
3 day(s) RMSE                      : 0.05951759
3 day(s) R2                        : -1.85691506
3 day(s) Pearson r                 : 0.00156028
3 day(s) QLIKE                     : 10.92491631
5 day(s) MAE                       : 0.04808903
5 day(s) RMSE                      : 0.05958644
5 day(s) R2                        : -1.86169863
5 day(s) Pearson r                 : 0.00042791
5 day(s) QLIKE                     : 11.78791560
10 day(s) MAE                      : 0.04820997
10 day(s) RMSE                     : 0.05972167
10 day(s) R2                       : -1.86748299
10 day(s) Pearson r                : -0.00081694
10 day(s) QLIKE                    : 10.41373059
20 day(s) MAE                      : 0.04821613
20 day(s) RMSE                     : 0.05972639
20 day(s) R2                       : -1.86957323
20 day(s) Pearson r                : -0.00056575
20 day(s) QLIKE                    : 8.77163221
full horizon MAE                   : 0.04821613
full horizon RMSE                  : 0.05972639
full horizon R2                    : -1.86957323
full horizon Pearson r             : -0.00056575
full horizon QLIKE                 : 8.77163221

--- Task 3 ---
1 day(s) MAE                       : 0.44311935
1 day(s) RMSE                      : 0.47420316
1 day(s) R2                        : -0.43050940
1 day(s) Pearson r                 : 0.00018964
1 day(s) QLIKE                     : 8.10491166
3 day(s) MAE                       : 0.44568900
3 day(s) RMSE                      : 0.47805818
3 day(s) R2                        : -0.45532647
3 day(s) Pearson r                 : -0.02941107
3 day(s) QLIKE                     : 8.10606462
5 day(s) MAE                       : 0.44383446
5 day(s) RMSE                      : 0.47558486
5 day(s) R2                        : -0.44147780
5 day(s) Pearson r                 : -0.02047093
5 day(s) QLIKE                     : 8.10680892
10 day(s) MAE                      : 0.41295804
10 day(s) RMSE                     : 0.44832361
10 day(s) R2                       : -0.28356218
10 day(s) Pearson r                : 0.01227319
10 day(s) QLIKE                    : 8.14947327
20 day(s) MAE                      : 0.38520061
20 day(s) RMSE                     : 0.44414584
20 day(s) R2                       : -0.26058339
20 day(s) Pearson r                : 0.00192712
20 day(s) QLIKE                    : 8.30886586
full horizon MAE                   : 0.38520061
full horizon RMSE                  : 0.44414584
full horizon R2                    : -0.26058339
full horizon Pearson r             : 0.00192712
full horizon QLIKE                 : 8.30886586

--- Task 4 ---
1 day(s) MAE                       : 0.91484698
1 day(s) RMSE                      : 1.26119804
1 day(s) R2                        : -0.99985624
1 day(s) Pearson r                 : -0.00430465
1 day(s) QLIKE                     : 0.75853718
3 day(s) MAE                       : 0.92830664
3 day(s) RMSE                      : 1.27501590
3 day(s) R2                        : -1.04801664
3 day(s) Pearson r                 : 0.00528653
3 day(s) QLIKE                     : 0.79244523
5 day(s) MAE                       : 0.93973464
5 day(s) RMSE                      : 1.28599246
5 day(s) R2                        : -1.08731664
5 day(s) Pearson r                 : 0.00363529
5 day(s) QLIKE                     : 0.80835937
10 day(s) MAE                      : 0.96529254
10 day(s) RMSE                     : 1.30750444
10 day(s) R2                       : -1.16673448
10 day(s) Pearson r                : -0.00121205
10 day(s) QLIKE                    : 1.13854302
20 day(s) MAE                      : 0.98846781
20 day(s) RMSE                     : 1.32541954
20 day(s) R2                       : -1.23850227
20 day(s) Pearson r                : -0.00482949
20 day(s) QLIKE                    : 1.25539802
full horizon MAE                   : 0.98846781
full horizon RMSE                  : 1.32541954
full horizon R2                    : -1.23850227
full horizon Pearson r             : -0.00482949
full horizon QLIKE                 : 1.25539802

--- Task 5 ---
1 day(s) MAE                       : 0.09541013
1 day(s) RMSE                      : 0.11306167
1 day(s) R2                        : -2.47224958
1 day(s) Pearson r                 : -0.14310877
1 day(s) QLIKE                     : 2.97845345
3 day(s) MAE                       : 0.09571892
3 day(s) RMSE                      : 0.11323314
3 day(s) R2                        : -2.50100981
3 day(s) Pearson r                 : -0.01401595
3 day(s) QLIKE                     : 2.99086019
5 day(s) MAE                       : 0.09604450
5 day(s) RMSE                      : 0.11342463
5 day(s) R2                        : -2.53029237
5 day(s) Pearson r                 : -0.00696806
5 day(s) QLIKE                     : 2.98924592
10 day(s) MAE                      : 0.09619621
10 day(s) RMSE                     : 0.11320709
10 day(s) R2                       : -2.55941723
10 day(s) Pearson r                : 0.00827639
10 day(s) QLIKE                    : 3.68924468
20 day(s) MAE                      : 0.09495995
20 day(s) RMSE                     : 0.11120804
20 day(s) R2                       : -2.51998766
20 day(s) Pearson r                : 0.03214793
20 day(s) QLIKE                    : 4.34758043
full horizon MAE                   : 0.09495995
full horizon RMSE                  : 0.11120804
full horizon R2                    : -2.51998766
full horizon Pearson r             : 0.03214793
full horizon QLIKE                 : 4.34758043

--- Task 6 ---
1 day(s) MAE                       : 2.63798043
1 day(s) RMSE                      : 3.05530628
1 day(s) R2                        : -0.17972566
1 day(s) Pearson r                 : 0.11015519
1 day(s) QLIKE                     : 0.07085534
3 day(s) MAE                       : 3.18019697
3 day(s) RMSE                      : 3.57812916
3 day(s) R2                        : -0.61287740
3 day(s) Pearson r                 : -0.00189960
3 day(s) QLIKE                     : 0.07457867
5 day(s) MAE                       : 3.99490845
5 day(s) RMSE                      : 4.54833107
5 day(s) R2                        : -1.59778774
5 day(s) Pearson r                 : -0.00180272
5 day(s) QLIKE                     : 0.08349651
10 day(s) MAE                      : 6.64178661
10 day(s) RMSE                     : 7.62179630
10 day(s) R2                       : -6.24452935
10 day(s) Pearson r                : -0.00391547
10 day(s) QLIKE                    : 0.11007501
20 day(s) MAE                      : 9.78483721
20 day(s) RMSE                     : 10.81017058
20 day(s) R2                       : -13.36447969
20 day(s) Pearson r                : -0.00828981
20 day(s) QLIKE                    : 0.12350550
full horizon MAE                   : 9.78483721
full horizon RMSE                  : 10.81017058
full horizon R2                    : -13.36447969
full horizon Pearson r             : -0.00828981
full horizon QLIKE                 : 0.12350550

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/C/Custom_KAN_LSTM_H20.pkl

Saved y_true min=0.525001, max=134.647
Saved y_pred min=3.17274, max=3.62561

=== BTCUSDT | H=1 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2399
Time steps for y: 1
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1727, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  27.572355514245807
  Min value:  -0.39529821705307827
Checking X_time_train_core:
Shape: (1727, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.8178490491226165
Checking y_train_core (log_mse scaled):
Shape: (1727, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.551349524903824
  Min value:  -20.89631563497493
Checking X_price_val:
Shape: (192, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.1802886452741648
  Min value:  -0.3973706893292718
Checking X_time_val:
Shape: (192, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_val (log_mse scaled):
Shape: (192, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3448190912377083
  Min value:  -4.2413266524913515
Checking X_price_test:
Shape: (480, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.92735513679459
  Min value:  -0.3866725144992877
Checking X_time_test:
Shape: (480, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_test (log_mse scaled):
Shape: (480, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5802586573962314
  Min value:  -109.72965053174772
Epoch 001 | phase=1 | train_loss=4.9619 | val_main=1.949324
Epoch 002 | phase=1 | train_loss=4.9367 | val_main=1.949362
Epoch 003 | phase=1 | train_loss=4.8141 | val_main=1.949575
Epoch 004 | phase=1 | train_loss=4.6283 | val_main=1.950402
Epoch 005 | phase=1 | train_loss=4.4513 | val_main=1.951415
Epoch 006 | phase=1 | train_loss=4.5535 | val_main=1.951785
Epoch 007 | phase=1 | train_loss=4.3106 | val_main=1.951692
Epoch 008 | phase=1 | train_loss=4.2149 | val_main=1.951611
Epoch 009 | phase=1 | train_loss=4.1726 | val_main=1.951826
Epoch 010 | phase=1 | train_loss=4.0538 | val_main=1.952064
Epoch 011 | phase=1 | train_loss=4.0276 | val_main=1.951197
Epoch 012 | phase=1 | train_loss=3.8476 | val_main=1.951272
Epoch 013 | phase=1 | train_loss=3.6162 | val_main=1.951518
Epoch 014 | phase=1 | train_loss=3.4065 | val_main=1.953311
Epoch 015 | phase=1 | train_loss=2.8490 | val_main=1.954090
Epoch 016 | phase=0 | train_loss=3.4172 | val_main=1.813849
Epoch 017 | phase=0 | train_loss=2.8808 | val_main=1.206948
Epoch 018 | phase=0 | train_loss=2.3493 | val_main=0.930464
Epoch 019 | phase=0 | train_loss=2.0559 | val_main=0.885287
Epoch 020 | phase=0 | train_loss=1.8430 | val_main=0.873508
Epoch 021 | phase=0 | train_loss=1.7791 | val_main=0.850038
Epoch 022 | phase=0 | train_loss=1.6151 | val_main=0.819467
Epoch 023 | phase=0 | train_loss=1.5631 | val_main=0.867410
Epoch 024 | phase=0 | train_loss=1.4488 | val_main=0.815839
Epoch 025 | phase=0 | train_loss=1.3600 | val_main=0.776645
Epoch 026 | phase=0 | train_loss=1.2990 | val_main=0.778339
Epoch 027 | phase=0 | train_loss=1.2109 | val_main=0.802437
Epoch 028 | phase=0 | train_loss=1.1480 | val_main=0.896909
Epoch 029 | phase=0 | train_loss=1.0978 | val_main=0.800879
Epoch 030 | phase=0 | train_loss=1.0263 | val_main=0.837132
Epoch 031 | phase=2 | train_loss=0.2780 | val_main=0.857954
Epoch 032 | phase=2 | train_loss=0.2690 | val_main=0.783860
Epoch 033 | phase=2 | train_loss=0.2565 | val_main=0.822531
Epoch 034 | phase=2 | train_loss=0.2385 | val_main=0.876996
Epoch 035 | phase=2 | train_loss=0.2292 | val_main=0.803932
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 6
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 3.99689241
1 day(s) RMSE                      : 8.87809756
1 day(s) R2                        : 0.06590336
1 day(s) Pearson r                 : 0.37813751
1 day(s) QLIKE                     : 0.25580831
full horizon MAE                   : 3.99689241
full horizon RMSE                  : 8.87809756
full horizon R2                    : 0.06590336
full horizon Pearson r             : 0.37813751
full horizon QLIKE                 : 0.25580831

--- Task 2 ---
1 day(s) MAE                       : 0.12522921
1 day(s) RMSE                      : 0.23789846
1 day(s) R2                        : -0.06450087
1 day(s) Pearson r                 : -0.11008888
1 day(s) QLIKE                     : 20.94522227
full horizon MAE                   : 0.12522921
full horizon RMSE                  : 0.23789846
full horizon R2                    : -0.06450087
full horizon Pearson r             : -0.11008888
full horizon QLIKE                 : 20.94522227

--- Task 3 ---
1 day(s) MAE                       : 0.20875851
1 day(s) RMSE                      : 0.30113533
1 day(s) R2                        : -0.64278885
1 day(s) Pearson r                 : -0.06593835
1 day(s) QLIKE                     : 0.17548291
full horizon MAE                   : 0.20875851
full horizon RMSE                  : 0.30113533
full horizon R2                    : -0.64278885
full horizon Pearson r             : -0.06593835
full horizon QLIKE                 : 0.17548291

--- Task 4 ---
1 day(s) MAE                       : 1.01029246
1 day(s) RMSE                      : 2.29848735
1 day(s) R2                        : -0.06045923
1 day(s) Pearson r                 : -0.05249756
1 day(s) QLIKE                     : 5.34298519
full horizon MAE                   : 1.01029246
full horizon RMSE                  : 2.29848735
full horizon R2                    : -0.06045923
full horizon Pearson r             : -0.05249756
full horizon QLIKE                 : 5.34298519

--- Task 5 ---
1 day(s) MAE                       : 0.10102501
1 day(s) RMSE                      : 0.11512802
1 day(s) R2                        : -3.30053214
1 day(s) Pearson r                 : 0.09771618
1 day(s) QLIKE                     : 4.13264647
full horizon MAE                   : 0.10102501
full horizon RMSE                  : 0.11512802
full horizon R2                    : -3.30053214
full horizon Pearson r             : 0.09771618
full horizon QLIKE                 : 4.13264647

--- Task 6 ---
1 day(s) MAE                       : 1.07600708
1 day(s) RMSE                      : 1.31503076
1 day(s) R2                        : -0.96975623
1 day(s) Pearson r                 : -0.16049800
1 day(s) QLIKE                     : 0.07400128
full horizon MAE                   : 1.07600708
full horizon RMSE                  : 1.31503076
full horizon R2                    : -0.96975623
full horizon Pearson r             : -0.16049800
full horizon QLIKE                 : 0.07400128

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/BTCUSDT/Custom_KAN_H1.pkl

Saved y_true min=0.505764, max=131.49
Saved y_pred min=0.973505, max=51.0523

=== BTCUSDT | H=1 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2399
Time steps for y: 1
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1727, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  27.572355514245807
  Min value:  -0.39529821705307827
Checking X_time_train_core:
Shape: (1727, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.8178490491226165
Checking y_train_core (log_mse scaled):
Shape: (1727, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.551349524903824
  Min value:  -20.89631563497493
Checking X_price_val:
Shape: (192, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.1802886452741648
  Min value:  -0.3973706893292718
Checking X_time_val:
Shape: (192, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_val (log_mse scaled):
Shape: (192, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3448190912377083
  Min value:  -4.2413266524913515
Checking X_price_test:
Shape: (480, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.92735513679459
  Min value:  -0.3866725144992877
Checking X_time_test:
Shape: (480, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_test (log_mse scaled):
Shape: (480, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5802586573962314
  Min value:  -109.72965053174772
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 326s 463ms/step - loss: 1.0056 - val_loss: 1.6621 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 9s 174ms/step - loss: 0.9704 - val_loss: 1.7061 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 10s 190ms/step - loss: 0.9420 - val_loss: 1.6756 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 10s 188ms/step - loss: 0.9190 - val_loss: 1.0481 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 10s 189ms/step - loss: 0.9057 - val_loss: 1.2995 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 10s 187ms/step - loss: 0.8963 - val_loss: 0.9975 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 10s 188ms/step - loss: 0.8975 - val_loss: 0.9580 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 10s 186ms/step - loss: 0.8958 - val_loss: 1.0102 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 10s 189ms/step - loss: 0.8915 - val_loss: 0.9875 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=10 tf_ratio=0.526 -> TF=ON
Epoch 10/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 10s 185ms/step - loss: 0.8846 - val_loss: 0.9575 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=11 tf_ratio=0.474 -> TF=ON
Epoch 11/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 10s 185ms/step - loss: 0.8748 - val_loss: 1.1311 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=12 tf_ratio=0.421 -> TF=ON
Epoch 12/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 10s 186ms/step - loss: 0.8814 - val_loss: 1.1296 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=13 tf_ratio=0.368 -> TF=ON
Epoch 13/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 10s 186ms/step - loss: 0.8856 - val_loss: 1.0030 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=14 tf_ratio=0.316 -> TF=ON
Epoch 14/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 10s 187ms/step - loss: 0.8848 - val_loss: 0.9057 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=15 tf_ratio=0.263 -> TF=ON
Epoch 15/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 10s 188ms/step - loss: 0.8774 - val_loss: 1.0175 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=16 tf_ratio=0.211 -> TF=ON
Epoch 16/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 10s 186ms/step - loss: 0.8679 - val_loss: 1.0047 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=17 tf_ratio=0.158 -> TF=ON
Epoch 17/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 10s 189ms/step - loss: 0.8713 - val_loss: 0.9151 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=18 tf_ratio=0.105 -> TF=ON
Epoch 18/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 12s 217ms/step - loss: 0.8680 - val_loss: 1.2609 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=19 tf_ratio=0.053 -> TF=ON
Epoch 19/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 0s 188ms/step - loss: 0.8670
Epoch 19: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
54/54 ━━━━━━━━━━━━━━━━━━━━ 10s 194ms/step - loss: 0.8939 - val_loss: 1.0361 - learning_rate: 5.0000e-04
Epoch 19: early stopping
Restoring model weights from the end of the best epoch: 14.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 1
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.45645212
1 day(s) RMSE                      : 9.13137586
1 day(s) R2                        : 0.01184648
1 day(s) Pearson r                 : 0.31176761
1 day(s) QLIKE                     : 0.32361205
full horizon MAE                   : 4.45645212
full horizon RMSE                  : 9.13137586
full horizon R2                    : 0.01184648
full horizon Pearson r             : 0.31176761
full horizon QLIKE                 : 0.32361205

--- Task 2 ---
1 day(s) MAE                       : 0.08936264
1 day(s) RMSE                      : 0.24505932
1 day(s) R2                        : -0.12954935
1 day(s) Pearson r                 : -0.03794293
1 day(s) QLIKE                     : 3.57849947
full horizon MAE                   : 0.08936264
full horizon RMSE                  : 0.24505932
full horizon R2                    : -0.12954935
full horizon Pearson r             : -0.03794293
full horizon QLIKE                 : 3.57849947

--- Task 3 ---
1 day(s) MAE                       : 0.18000615
1 day(s) RMSE                      : 0.24963807
1 day(s) R2                        : -0.12896363
1 day(s) Pearson r                 : -0.02917219
1 day(s) QLIKE                     : 0.14270441
full horizon MAE                   : 0.18000615
full horizon RMSE                  : 0.24963807
full horizon R2                    : -0.12896363
full horizon Pearson r             : -0.02917219
full horizon QLIKE                 : 0.14270441

--- Task 4 ---
1 day(s) MAE                       : 0.95190070
1 day(s) RMSE                      : 2.40956885
1 day(s) R2                        : -0.16543599
1 day(s) Pearson r                 : 0.11071001
1 day(s) QLIKE                     : 1.19749746
full horizon MAE                   : 0.95190070
full horizon RMSE                  : 2.40956885
full horizon R2                    : -0.16543599
full horizon Pearson r             : 0.11071001
full horizon QLIKE                 : 1.19749746

--- Task 5 ---
1 day(s) MAE                       : 0.10188222
1 day(s) RMSE                      : 0.11602593
1 day(s) R2                        : -3.36787590
1 day(s) Pearson r                 : -0.02185372
1 day(s) QLIKE                     : 1.62015561
full horizon MAE                   : 0.10188222
full horizon RMSE                  : 0.11602593
full horizon R2                    : -3.36787590
full horizon Pearson r             : -0.02185372
full horizon QLIKE                 : 1.62015561

--- Task 6 ---
1 day(s) MAE                       : 0.84278382
1 day(s) RMSE                      : 1.00083520
1 day(s) R2                        : -0.14094787
1 day(s) Pearson r                 : 0.14875183
1 day(s) QLIKE                     : 0.06336785
full horizon MAE                   : 0.84278382
full horizon RMSE                  : 1.00083520
full horizon R2                    : -0.14094787
full horizon Pearson r             : 0.14875183
full horizon QLIKE                 : 0.06336785

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/BTCUSDT/Custom_KAN_LSTM_H1.pkl

Saved y_true min=0.505764, max=131.49
Saved y_pred min=2.36666, max=36.5761

=== BTCUSDT | H=5 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2399
Time steps for y: 5
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1727, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  27.572355514245807
  Min value:  -0.39529821705307827
Checking X_time_train_core:
Shape: (1727, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.8178490491226165
Checking y_train_core (log_mse scaled):
Shape: (1727, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.5541968122841485
  Min value:  -20.88291388736243
Checking X_price_val:
Shape: (192, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.1802886452741648
  Min value:  -0.3973706893292718
Checking X_time_val:
Shape: (192, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_val (log_mse scaled):
Shape: (192, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.345979297163924
  Min value:  -4.245202118935104
Checking X_price_test:
Shape: (480, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.92735513679459
  Min value:  -0.3866725144992877
Checking X_time_test:
Shape: (480, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_test (log_mse scaled):
Shape: (480, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5815988770306517
  Min value:  -109.67386610990627
Epoch 001 | phase=1 | train_loss=5.1067 | val_main=1.966552
Epoch 002 | phase=1 | train_loss=4.9556 | val_main=1.966579
Epoch 003 | phase=1 | train_loss=4.8628 | val_main=1.966770
Epoch 004 | phase=1 | train_loss=4.6538 | val_main=1.966857
Epoch 005 | phase=1 | train_loss=4.4947 | val_main=1.967437
Epoch 006 | phase=1 | train_loss=4.4878 | val_main=1.967708
Epoch 007 | phase=1 | train_loss=4.3781 | val_main=1.967751
Epoch 008 | phase=1 | train_loss=4.2601 | val_main=1.967540
Epoch 009 | phase=1 | train_loss=4.2389 | val_main=1.967807
Epoch 010 | phase=1 | train_loss=4.1308 | val_main=1.967275
Epoch 011 | phase=1 | train_loss=4.1114 | val_main=1.967112
Epoch 012 | phase=1 | train_loss=3.9747 | val_main=1.967030
Epoch 013 | phase=1 | train_loss=3.8005 | val_main=1.966801
Epoch 014 | phase=1 | train_loss=3.4798 | val_main=1.966577
Epoch 015 | phase=1 | train_loss=3.0532 | val_main=1.965674
Epoch 016 | phase=0 | train_loss=3.6851 | val_main=1.882010
Epoch 017 | phase=0 | train_loss=3.4961 | val_main=1.544262
Epoch 018 | phase=0 | train_loss=3.0363 | val_main=1.046149
Epoch 019 | phase=0 | train_loss=2.8899 | val_main=1.011407
Epoch 020 | phase=0 | train_loss=2.6411 | val_main=0.993511
Epoch 021 | phase=0 | train_loss=2.5268 | val_main=1.036548
Epoch 022 | phase=0 | train_loss=2.4292 | val_main=0.996138
Epoch 023 | phase=0 | train_loss=2.4264 | val_main=0.994868
Epoch 024 | phase=0 | train_loss=2.3487 | val_main=1.074887
Epoch 025 | phase=0 | train_loss=2.1611 | val_main=1.035774
Epoch 026 | phase=0 | train_loss=2.0269 | val_main=1.132779
Epoch 027 | phase=0 | train_loss=1.9207 | val_main=1.095498
Epoch 028 | phase=0 | train_loss=1.8699 | val_main=1.147024
Epoch 029 | phase=0 | train_loss=1.8330 | val_main=1.157140
Epoch 030 | phase=0 | train_loss=1.7701 | val_main=1.278695
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 30
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.32155870
1 day(s) RMSE                      : 9.09525472
1 day(s) R2                        : 0.01964873
1 day(s) Pearson r                 : 0.26370386
1 day(s) QLIKE                     : 0.33020517
3 day(s) MAE                       : 4.42780266
3 day(s) RMSE                      : 9.21733197
3 day(s) R2                        : -0.00670508
3 day(s) Pearson r                 : 0.20914083
3 day(s) QLIKE                     : 0.35241421
5 day(s) MAE                       : 4.50829430
5 day(s) RMSE                      : 9.30881624
5 day(s) R2                        : -0.02710981
5 day(s) Pearson r                 : 0.16253769
5 day(s) QLIKE                     : 0.36762041
full horizon MAE                   : 4.50829430
full horizon RMSE                  : 9.30881624
full horizon R2                    : -0.02710981
full horizon Pearson r             : 0.16253769
full horizon QLIKE                 : 0.36762041

--- Task 2 ---
1 day(s) MAE                       : 0.12419797
1 day(s) RMSE                      : 0.23674999
1 day(s) R2                        : -0.05424776
1 day(s) Pearson r                 : -0.13281888
1 day(s) QLIKE                     : 20.74798964
3 day(s) MAE                       : 0.12083435
3 day(s) RMSE                      : 0.23652339
3 day(s) R2                        : -0.05228187
3 day(s) Pearson r                 : -0.12856517
3 day(s) QLIKE                     : 20.45252864
5 day(s) MAE                       : 0.11944532
5 day(s) RMSE                      : 0.23620560
5 day(s) R2                        : -0.04949837
5 day(s) Pearson r                 : -0.11651827
5 day(s) QLIKE                     : 20.01441203
full horizon MAE                   : 0.11944532
full horizon RMSE                  : 0.23620560
full horizon R2                    : -0.04949837
full horizon Pearson r             : -0.11651827
full horizon QLIKE                 : 20.01441203

--- Task 3 ---
1 day(s) MAE                       : 0.15538194
1 day(s) RMSE                      : 0.25847641
1 day(s) R2                        : -0.21031981
1 day(s) Pearson r                 : -0.04380784
1 day(s) QLIKE                     : 0.15039563
3 day(s) MAE                       : 0.15936760
3 day(s) RMSE                      : 0.25865647
3 day(s) R2                        : -0.21254007
3 day(s) Pearson r                 : -0.05253732
3 day(s) QLIKE                     : 0.15041565
5 day(s) MAE                       : 0.16059436
5 day(s) RMSE                      : 0.25927227
5 day(s) R2                        : -0.21883872
5 day(s) Pearson r                 : -0.05206314
5 day(s) QLIKE                     : 0.15071995
full horizon MAE                   : 0.16059436
full horizon RMSE                  : 0.25927227
full horizon R2                    : -0.21883872
full horizon Pearson r             : -0.05206314
full horizon QLIKE                 : 0.15071995

--- Task 4 ---
1 day(s) MAE                       : 1.02965059
1 day(s) RMSE                      : 2.31898338
1 day(s) R2                        : -0.07945618
1 day(s) Pearson r                 : -0.04504905
1 day(s) QLIKE                     : 8.29950327
3 day(s) MAE                       : 1.03406936
3 day(s) RMSE                      : 2.31616901
3 day(s) R2                        : -0.07725551
3 day(s) Pearson r                 : -0.04398572
3 day(s) QLIKE                     : 8.31899924
5 day(s) MAE                       : 1.03812575
5 day(s) RMSE                      : 2.31174651
5 day(s) R2                        : -0.07356225
5 day(s) Pearson r                 : -0.04064483
5 day(s) QLIKE                     : 8.46092194
full horizon MAE                   : 1.03812575
full horizon RMSE                  : 2.31174651
full horizon R2                    : -0.07356225
full horizon Pearson r             : -0.04064483
full horizon QLIKE                 : 8.46092194

--- Task 5 ---
1 day(s) MAE                       : 0.10184603
1 day(s) RMSE                      : 0.11598547
1 day(s) R2                        : -3.36482943
1 day(s) Pearson r                 : 0.11927018
1 day(s) QLIKE                     : 3.03424335
3 day(s) MAE                       : 0.10188542
3 day(s) RMSE                      : 0.11601122
3 day(s) R2                        : -3.36987900
3 day(s) Pearson r                 : 0.11811853
3 day(s) QLIKE                     : 3.07196006
5 day(s) MAE                       : 0.10192661
5 day(s) RMSE                      : 0.11603690
5 day(s) R2                        : -3.37547946
5 day(s) Pearson r                 : 0.11583503
5 day(s) QLIKE                     : 3.06970227
full horizon MAE                   : 0.10192661
full horizon RMSE                  : 0.11603690
full horizon R2                    : -3.37547946
full horizon Pearson r             : 0.11583503
full horizon QLIKE                 : 3.06970227

--- Task 6 ---
1 day(s) MAE                       : 1.07096173
1 day(s) RMSE                      : 1.30395656
1 day(s) R2                        : -0.93672030
1 day(s) Pearson r                 : -0.17981276
1 day(s) QLIKE                     : 0.07433467
3 day(s) MAE                       : 1.07322252
3 day(s) RMSE                      : 1.30400081
3 day(s) R2                        : -0.93827117
3 day(s) Pearson r                 : -0.18151810
3 day(s) QLIKE                     : 0.07423962
5 day(s) MAE                       : 1.07938556
5 day(s) RMSE                      : 1.30831716
5 day(s) R2                        : -0.95209647
5 day(s) Pearson r                 : -0.18358153
5 day(s) QLIKE                     : 0.07432418
full horizon MAE                   : 1.07938556
full horizon RMSE                  : 1.30831716
full horizon R2                    : -0.95209647
full horizon Pearson r             : -0.18358153
full horizon QLIKE                 : 0.07432418

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/BTCUSDT/Custom_KAN_H5.pkl

Saved y_true min=0.505764, max=131.49
Saved y_pred min=2.52162, max=19.0213

=== BTCUSDT | H=5 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2399
Time steps for y: 5
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1727, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  27.572355514245807
  Min value:  -0.39529821705307827
Checking X_time_train_core:
Shape: (1727, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.8178490491226165
Checking y_train_core (log_mse scaled):
Shape: (1727, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.5541968122841485
  Min value:  -20.88291388736243
Checking X_price_val:
Shape: (192, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.1802886452741648
  Min value:  -0.3973706893292718
Checking X_time_val:
Shape: (192, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_val (log_mse scaled):
Shape: (192, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.345979297163924
  Min value:  -4.245202118935104
Checking X_price_test:
Shape: (480, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.92735513679459
  Min value:  -0.3866725144992877
Checking X_time_test:
Shape: (480, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_test (log_mse scaled):
Shape: (480, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5815988770306517
  Min value:  -109.67386610990627
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 165s 406ms/step - loss: 0.8001 - val_loss: 2.1970 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 12s 224ms/step - loss: 0.5008 - val_loss: 1.2214 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 13s 246ms/step - loss: 0.4477 - val_loss: 1.0595 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 13s 249ms/step - loss: 0.4300 - val_loss: 1.8890 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 13s 248ms/step - loss: 0.4173 - val_loss: 1.1321 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 13s 241ms/step - loss: 0.4071 - val_loss: 1.1852 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 13s 248ms/step - loss: 0.3972 - val_loss: 1.0617 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 13s 241ms/step - loss: 0.3927 - val_loss: 1.0052 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 13s 242ms/step - loss: 0.3940 - val_loss: 1.1075 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=10 tf_ratio=0.526 -> TF=ON
Epoch 10/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 13s 243ms/step - loss: 0.3824 - val_loss: 1.0477 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=11 tf_ratio=0.474 -> TF=ON
Epoch 11/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 13s 241ms/step - loss: 0.3799 - val_loss: 1.0129 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=12 tf_ratio=0.421 -> TF=ON
Epoch 12/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 13s 243ms/step - loss: 0.3781 - val_loss: 1.0360 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=13 tf_ratio=0.368 -> TF=ON
Epoch 13/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 0s 235ms/step - loss: 0.3464
Epoch 13: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
54/54 ━━━━━━━━━━━━━━━━━━━━ 13s 242ms/step - loss: 0.3687 - val_loss: 1.0622 - learning_rate: 5.0000e-04
Epoch 13: early stopping
Restoring model weights from the end of the best epoch: 8.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 5
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.37998011
1 day(s) RMSE                      : 9.32960953
1 day(s) R2                        : -0.03152300
1 day(s) Pearson r                 : 0.27056371
1 day(s) QLIKE                     : 0.33685103
3 day(s) MAE                       : 4.66039185
3 day(s) RMSE                      : 9.57569521
3 day(s) R2                        : -0.08650676
3 day(s) Pearson r                 : 0.22324885
3 day(s) QLIKE                     : 0.36156530
5 day(s) MAE                       : 4.78826706
5 day(s) RMSE                      : 9.74504535
5 day(s) R2                        : -0.12563009
5 day(s) Pearson r                 : 0.17607040
5 day(s) QLIKE                     : 0.37064856
full horizon MAE                   : 4.78826706
full horizon RMSE                  : 9.74504535
full horizon R2                    : -0.12563009
full horizon Pearson r             : 0.17607040
full horizon QLIKE                 : 0.37064856

--- Task 2 ---
1 day(s) MAE                       : 0.08864709
1 day(s) RMSE                      : 0.24469260
1 day(s) R2                        : -0.12617123
1 day(s) Pearson r                 : 0.00452437
1 day(s) QLIKE                     : 3.48209836
3 day(s) MAE                       : 0.09094408
3 day(s) RMSE                      : 0.24565945
3 day(s) R2                        : -0.13514376
3 day(s) Pearson r                 : 0.03718493
3 day(s) QLIKE                     : 18.27182264
5 day(s) MAE                       : 0.09212537
5 day(s) RMSE                      : 0.24451071
5 day(s) R2                        : -0.12459760
5 day(s) Pearson r                 : 0.04623455
5 day(s) QLIKE                     : 18.37324118
full horizon MAE                   : 0.09212537
full horizon RMSE                  : 0.24451071
full horizon R2                    : -0.12459760
full horizon Pearson r             : 0.04623455
full horizon QLIKE                 : 18.37324118

--- Task 3 ---
1 day(s) MAE                       : 0.15702659
1 day(s) RMSE                      : 0.23707718
1 day(s) R2                        : -0.01821114
1 day(s) Pearson r                 : 0.00897044
1 day(s) QLIKE                     : 0.13956536
3 day(s) MAE                       : 0.12926117
3 day(s) RMSE                      : 0.24399567
3 day(s) R2                        : -0.07898061
3 day(s) Pearson r                 : -0.01439370
3 day(s) QLIKE                     : 0.14145143
5 day(s) MAE                       : 0.12612569
5 day(s) RMSE                      : 0.25020883
5 day(s) R2                        : -0.13511377
5 day(s) Pearson r                 : -0.01982481
5 day(s) QLIKE                     : 0.14158814
full horizon MAE                   : 0.12612569
full horizon RMSE                  : 0.25020883
full horizon R2                    : -0.13511377
full horizon Pearson r             : -0.01982481
full horizon QLIKE                 : 0.14158814

--- Task 4 ---
1 day(s) MAE                       : 0.94404606
1 day(s) RMSE                      : 2.39694921
1 day(s) R2                        : -0.15326047
1 day(s) Pearson r                 : 0.19739123
1 day(s) QLIKE                     : 1.15003828
3 day(s) MAE                       : 0.94345598
3 day(s) RMSE                      : 2.39994009
3 day(s) R2                        : -0.15658893
3 day(s) Pearson r                 : 0.13367198
3 day(s) QLIKE                     : 1.17792430
5 day(s) MAE                       : 0.95288359
5 day(s) RMSE                      : 2.40553834
5 day(s) R2                        : -0.16244223
5 day(s) Pearson r                 : 0.08956047
5 day(s) QLIKE                     : 1.25135580
full horizon MAE                   : 0.95288359
full horizon RMSE                  : 2.40553834
full horizon R2                    : -0.16244223
full horizon Pearson r             : 0.08956047
full horizon QLIKE                 : 1.25135580

--- Task 5 ---
1 day(s) MAE                       : 0.10188225
1 day(s) RMSE                      : 0.11602528
1 day(s) R2                        : -3.36782680
1 day(s) Pearson r                 : 0.04964819
1 day(s) QLIKE                     : 1.05849258
3 day(s) MAE                       : 0.10192428
3 day(s) RMSE                      : 0.11605318
3 day(s) R2                        : -3.37304073
3 day(s) Pearson r                 : 0.03503306
3 day(s) QLIKE                     : 0.77728059
5 day(s) MAE                       : 0.10196556
5 day(s) RMSE                      : 0.11607818
5 day(s) R2                        : -3.37859320
5 day(s) Pearson r                 : 0.03743300
5 day(s) QLIKE                     : 0.67486963
full horizon MAE                   : 0.10196556
full horizon RMSE                  : 0.11607818
full horizon R2                    : -3.37859320
full horizon Pearson r             : 0.03743300
full horizon QLIKE                 : 0.67486963

--- Task 6 ---
1 day(s) MAE                       : 0.85245926
1 day(s) RMSE                      : 1.03063945
1 day(s) R2                        : -0.20991311
1 day(s) Pearson r                 : 0.11192787
1 day(s) QLIKE                     : 0.06336096
3 day(s) MAE                       : 0.87731532
3 day(s) RMSE                      : 1.07083279
3 day(s) R2                        : -0.30707990
3 day(s) Pearson r                 : 0.11311351
3 day(s) QLIKE                     : 0.06362444
5 day(s) MAE                       : 0.89304913
5 day(s) RMSE                      : 1.09715089
5 day(s) R2                        : -0.37280205
5 day(s) Pearson r                 : 0.10617994
5 day(s) QLIKE                     : 0.06386617
full horizon MAE                   : 0.89304913
full horizon RMSE                  : 1.09715089
full horizon R2                    : -0.37280205
full horizon Pearson r             : 0.10617994
full horizon QLIKE                 : 0.06386617

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/BTCUSDT/Custom_KAN_LSTM_H5.pkl

Saved y_true min=0.505764, max=131.49
Saved y_pred min=2.26456, max=26.0744

=== BTCUSDT | H=10 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2399
Time steps for y: 10
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1727, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  27.572355514245807
  Min value:  -0.39529821705307827
Checking X_time_train_core:
Shape: (1727, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.8178490491226165
Checking y_train_core (log_mse scaled):
Shape: (1727, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.556915178048574
  Min value:  -20.868486718259952
Checking X_price_val:
Shape: (192, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.1802886452741648
  Min value:  -0.3973706893292718
Checking X_time_val:
Shape: (192, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_val (log_mse scaled):
Shape: (192, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3469968336435327
  Min value:  -4.249261289934393
Checking X_price_test:
Shape: (480, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.92735513679459
  Min value:  -0.3866725144992877
Checking X_time_test:
Shape: (480, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_test (log_mse scaled):
Shape: (480, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5827978941292526
  Min value:  -109.59273460949947
Epoch 001 | phase=1 | train_loss=5.0298 | val_main=1.949810
Epoch 002 | phase=1 | train_loss=4.9852 | val_main=1.949735
Epoch 003 | phase=1 | train_loss=4.8836 | val_main=1.948614
Epoch 004 | phase=1 | train_loss=4.6657 | val_main=1.948013
Epoch 005 | phase=1 | train_loss=4.5066 | val_main=1.945951
Epoch 006 | phase=1 | train_loss=4.4786 | val_main=1.946096
Epoch 007 | phase=1 | train_loss=4.4168 | val_main=1.945846
Epoch 008 | phase=1 | train_loss=4.3088 | val_main=1.946028
Epoch 009 | phase=1 | train_loss=4.2377 | val_main=1.945848
Epoch 010 | phase=1 | train_loss=4.1184 | val_main=1.945946
Epoch 011 | phase=1 | train_loss=4.1097 | val_main=1.946785
Epoch 012 | phase=1 | train_loss=4.0173 | val_main=1.947196
Epoch 013 | phase=1 | train_loss=3.8850 | val_main=1.947487
Epoch 014 | phase=1 | train_loss=3.5633 | val_main=1.946051
Epoch 015 | phase=1 | train_loss=3.2237 | val_main=1.945862
Epoch 016 | phase=0 | train_loss=3.8517 | val_main=1.864077
Epoch 017 | phase=0 | train_loss=3.6000 | val_main=1.502214
Epoch 018 | phase=0 | train_loss=3.2291 | val_main=1.035893
Epoch 019 | phase=0 | train_loss=3.0875 | val_main=0.991500
Epoch 020 | phase=0 | train_loss=2.8155 | val_main=1.024988
Epoch 021 | phase=0 | train_loss=2.7151 | val_main=1.114521
Epoch 022 | phase=0 | train_loss=2.5908 | val_main=1.026490
Epoch 023 | phase=0 | train_loss=2.6136 | val_main=1.085709
Epoch 024 | phase=0 | train_loss=2.4820 | val_main=1.242400
Epoch 025 | phase=0 | train_loss=2.2974 | val_main=1.247347
Epoch 026 | phase=0 | train_loss=2.2296 | val_main=1.250378
Epoch 027 | phase=0 | train_loss=2.0995 | val_main=1.261745
Epoch 028 | phase=0 | train_loss=2.0278 | val_main=1.248143
Epoch 029 | phase=0 | train_loss=1.9787 | val_main=1.351460
Epoch 030 | phase=0 | train_loss=1.9113 | val_main=1.455147
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 60
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.46841920
1 day(s) RMSE                      : 9.34664726
1 day(s) R2                        : -0.03529398
1 day(s) Pearson r                 : 0.18461273
1 day(s) QLIKE                     : 0.36049401
3 day(s) MAE                       : 4.54869935
3 day(s) RMSE                      : 9.41913408
3 day(s) R2                        : -0.05126877
3 day(s) Pearson r                 : 0.13698581
3 day(s) QLIKE                     : 0.37792058
5 day(s) MAE                       : 4.60240802
5 day(s) RMSE                      : 9.48156697
5 day(s) R2                        : -0.06558524
5 day(s) Pearson r                 : 0.09720925
5 day(s) QLIKE                     : 0.39088521
10 day(s) MAE                      : 4.67468781
10 day(s) RMSE                     : 9.52560103
10 day(s) R2                       : -0.07524695
10 day(s) Pearson r                : 0.04463403
10 day(s) QLIKE                    : 0.40438674
full horizon MAE                   : 4.67468781
full horizon RMSE                  : 9.52560103
full horizon R2                    : -0.07524695
full horizon Pearson r             : 0.04463403
full horizon QLIKE                 : 0.40438674

--- Task 2 ---
1 day(s) MAE                       : 0.12099361
1 day(s) RMSE                      : 0.23714484
1 day(s) R2                        : -0.05776722
1 day(s) Pearson r                 : -0.12231277
1 day(s) QLIKE                     : 15.31280369
3 day(s) MAE                       : 0.12913004
3 day(s) RMSE                      : 0.23729434
3 day(s) R2                        : -0.05915293
3 day(s) Pearson r                 : -0.11832286
3 day(s) QLIKE                     : 15.12863930
5 day(s) MAE                       : 0.13129169
5 day(s) RMSE                      : 0.23745231
5 day(s) R2                        : -0.06060624
5 day(s) Pearson r                 : -0.11320677
5 day(s) QLIKE                     : 14.77843094
10 day(s) MAE                      : 0.13362233
10 day(s) RMSE                     : 0.23655085
10 day(s) R2                       : -0.05266620
10 day(s) Pearson r                : -0.08164940
10 day(s) QLIKE                    : 14.21489509
full horizon MAE                   : 0.13362233
full horizon RMSE                  : 0.23655085
full horizon R2                    : -0.05266620
full horizon Pearson r             : -0.08164940
full horizon QLIKE                 : 14.21489509

--- Task 3 ---
1 day(s) MAE                       : 0.23111976
1 day(s) RMSE                      : 0.31178204
1 day(s) R2                        : -0.76100466
1 day(s) Pearson r                 : -0.04063034
1 day(s) QLIKE                     : 0.17717095
3 day(s) MAE                       : 0.22976718
3 day(s) RMSE                      : 0.31113805
3 day(s) R2                        : -0.75450923
3 day(s) Pearson r                 : -0.03908292
3 day(s) QLIKE                     : 0.17729272
5 day(s) MAE                       : 0.22987197
5 day(s) RMSE                      : 0.31068148
5 day(s) R2                        : -0.75010802
5 day(s) Pearson r                 : -0.03665206
5 day(s) QLIKE                     : 0.17683439
10 day(s) MAE                      : 0.23084591
10 day(s) RMSE                     : 0.30795885
10 day(s) R2                       : -0.72137841
10 day(s) Pearson r                : -0.01774903
10 day(s) QLIKE                    : 0.17378386
full horizon MAE                   : 0.23084591
full horizon RMSE                  : 0.30795885
full horizon R2                    : -0.72137841
full horizon Pearson r             : -0.01774903
full horizon QLIKE                 : 0.17378386

--- Task 4 ---
1 day(s) MAE                       : 1.05547766
1 day(s) RMSE                      : 2.34647057
1 day(s) R2                        : -0.10519769
1 day(s) Pearson r                 : -0.12694788
1 day(s) QLIKE                     : 1.54223853
3 day(s) MAE                       : 1.05996495
3 day(s) RMSE                      : 2.34656190
3 day(s) R2                        : -0.10571260
3 day(s) Pearson r                 : -0.13652594
3 day(s) QLIKE                     : 1.51177112
5 day(s) MAE                       : 1.06284485
5 day(s) RMSE                      : 2.34636018
5 day(s) R2                        : -0.10595173
5 day(s) Pearson r                 : -0.14559983
5 day(s) QLIKE                     : 1.53101951
10 day(s) MAE                      : 1.05825101
10 day(s) RMSE                     : 2.33491791
10 day(s) R2                       : -0.09625197
10 day(s) Pearson r                : -0.14396451
10 day(s) QLIKE                    : 1.53927370
full horizon MAE                   : 1.05825101
full horizon RMSE                  : 2.33491791
full horizon R2                    : -0.09625197
full horizon Pearson r             : -0.14396451
full horizon QLIKE                 : 1.53927370

--- Task 5 ---
1 day(s) MAE                       : 0.10183479
1 day(s) RMSE                      : 0.11596860
1 day(s) R2                        : -3.36356026
1 day(s) Pearson r                 : 0.15714126
1 day(s) QLIKE                     : 3.29735790
3 day(s) MAE                       : 0.10187482
3 day(s) RMSE                      : 0.11599602
3 day(s) R2                        : -3.36873422
3 day(s) Pearson r                 : 0.14764430
3 day(s) QLIKE                     : 3.30241644
5 day(s) MAE                       : 0.10191270
5 day(s) RMSE                      : 0.11601782
5 day(s) R2                        : -3.37404069
5 day(s) Pearson r                 : 0.14501519
5 day(s) QLIKE                     : 3.34612649
10 day(s) MAE                      : 0.10202748
10 day(s) RMSE                     : 0.11608927
10 day(s) R2                       : -3.38967891
10 day(s) Pearson r                : 0.14523934
10 day(s) QLIKE                    : 3.31463005
full horizon MAE                   : 0.10202748
full horizon RMSE                  : 0.11608927
full horizon R2                    : -3.38967891
full horizon Pearson r             : 0.14523934
full horizon QLIKE                 : 3.31463005

--- Task 6 ---
1 day(s) MAE                       : 1.06773857
1 day(s) RMSE                      : 1.31292160
1 day(s) R2                        : -0.96344276
1 day(s) Pearson r                 : -0.16267462
1 day(s) QLIKE                     : 0.07387049
3 day(s) MAE                       : 1.06979713
3 day(s) RMSE                      : 1.31287555
3 day(s) R2                        : -0.96474383
3 day(s) Pearson r                 : -0.16701916
3 day(s) QLIKE                     : 0.07391207
5 day(s) MAE                       : 1.07207239
5 day(s) RMSE                      : 1.31200483
5 day(s) R2                        : -0.96311648
5 day(s) Pearson r                 : -0.16582457
5 day(s) QLIKE                     : 0.07382041
10 day(s) MAE                      : 1.08060834
10 day(s) RMSE                     : 1.31820021
10 day(s) R2                       : -0.98585885
10 day(s) Pearson r                : -0.17062564
10 day(s) QLIKE                    : 0.07381569
full horizon MAE                   : 1.08060834
full horizon RMSE                  : 1.31820021
full horizon R2                    : -0.98585885
full horizon Pearson r             : -0.17062564
full horizon QLIKE                 : 0.07381569

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/BTCUSDT/Custom_KAN_H10.pkl

Saved y_true min=0.505764, max=131.49
Saved y_pred min=2.67727, max=15.0112

=== BTCUSDT | H=10 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2399
Time steps for y: 10
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1727, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  27.572355514245807
  Min value:  -0.39529821705307827
Checking X_time_train_core:
Shape: (1727, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.8178490491226165
Checking y_train_core (log_mse scaled):
Shape: (1727, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.556915178048574
  Min value:  -20.868486718259952
Checking X_price_val:
Shape: (192, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.1802886452741648
  Min value:  -0.3973706893292718
Checking X_time_val:
Shape: (192, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_val (log_mse scaled):
Shape: (192, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3469968336435327
  Min value:  -4.249261289934393
Checking X_price_test:
Shape: (480, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.92735513679459
  Min value:  -0.3866725144992877
Checking X_time_test:
Shape: (480, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_test (log_mse scaled):
Shape: (480, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5827978941292526
  Min value:  -109.59273460949947
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 158s 536ms/step - loss: 0.7568 - val_loss: 2.3306 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 15s 274ms/step - loss: 0.4287 - val_loss: 2.1369 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 18s 336ms/step - loss: 0.3869 - val_loss: 2.5701 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 22s 413ms/step - loss: 0.3692 - val_loss: 1.5112 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 22s 403ms/step - loss: 0.3544 - val_loss: 1.5585 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 21s 390ms/step - loss: 0.3390 - val_loss: 2.2013 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 21s 391ms/step - loss: 0.3210 - val_loss: 1.3151 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 19s 354ms/step - loss: 0.3139 - val_loss: 1.3529 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 20s 368ms/step - loss: 0.3118 - val_loss: 1.0788 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=10 tf_ratio=0.526 -> TF=ON
Epoch 10/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 18s 341ms/step - loss: 0.2992 - val_loss: 1.0160 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=11 tf_ratio=0.474 -> TF=ON
Epoch 11/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 19s 349ms/step - loss: 0.3007 - val_loss: 2.6975 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=12 tf_ratio=0.421 -> TF=ON
Epoch 12/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 21s 395ms/step - loss: 0.2957 - val_loss: 1.0586 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=13 tf_ratio=0.368 -> TF=ON
Epoch 13/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 19s 343ms/step - loss: 0.2866 - val_loss: 1.1876 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=14 tf_ratio=0.316 -> TF=ON
Epoch 14/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 20s 371ms/step - loss: 0.2830 - val_loss: 1.1902 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=15 tf_ratio=0.263 -> TF=ON
Epoch 15/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 0s 356ms/step - loss: 0.2549
Epoch 15: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
54/54 ━━━━━━━━━━━━━━━━━━━━ 20s 365ms/step - loss: 0.2799 - val_loss: 1.1966 - learning_rate: 5.0000e-04
Epoch 15: early stopping
Restoring model weights from the end of the best epoch: 10.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 10
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.32967830
1 day(s) RMSE                      : 9.20767506
1 day(s) R2                        : -0.00473597
1 day(s) Pearson r                 : 0.27456301
1 day(s) QLIKE                     : 0.34320530
3 day(s) MAE                       : 5.03509087
3 day(s) RMSE                      : 9.76230297
3 day(s) R2                        : -0.12926630
3 day(s) Pearson r                 : 0.20317689
3 day(s) QLIKE                     : 0.41870079
5 day(s) MAE                       : 5.25859736
5 day(s) RMSE                      : 9.97053707
5 day(s) R2                        : -0.17832494
5 day(s) Pearson r                 : 0.16404759
5 day(s) QLIKE                     : 0.43018618
10 day(s) MAE                      : 5.30697933
10 day(s) RMSE                     : 10.18888775
10 day(s) R2                       : -0.23020362
10 day(s) Pearson r                : 0.11161061
10 day(s) QLIKE                    : 0.41922734
full horizon MAE                   : 5.30697933
full horizon RMSE                  : 10.18888775
full horizon R2                    : -0.23020362
full horizon Pearson r             : 0.11161061
full horizon QLIKE                 : 0.41922734

--- Task 2 ---
1 day(s) MAE                       : 0.09032272
1 day(s) RMSE                      : 0.24538916
1 day(s) R2                        : -0.13259205
1 day(s) Pearson r                 : 0.06749296
1 day(s) QLIKE                     : 3.38181918
3 day(s) MAE                       : 0.09510507
3 day(s) RMSE                      : 0.24837981
3 day(s) R2                        : -0.16042346
3 day(s) Pearson r                 : -0.00246471
3 day(s) QLIKE                     : 4.24752113
5 day(s) MAE                       : 0.09762256
5 day(s) RMSE                      : 0.24978319
5 day(s) R2                        : -0.17362079
5 day(s) Pearson r                 : -0.00148821
5 day(s) QLIKE                     : 16.17349950
10 day(s) MAE                      : 0.09954074
10 day(s) RMSE                     : 0.25083596
10 day(s) R2                       : -0.18364443
10 day(s) Pearson r                : -0.00099884
10 day(s) QLIKE                    : 9.98125563
full horizon MAE                   : 0.09954074
full horizon RMSE                  : 0.25083596
full horizon R2                    : -0.18364443
full horizon Pearson r             : -0.00099884
full horizon QLIKE                 : 9.98125563

--- Task 3 ---
1 day(s) MAE                       : 0.23265662
1 day(s) RMSE                      : 0.27098060
1 day(s) R2                        : -0.33025449
1 day(s) Pearson r                 : -0.17298793
1 day(s) QLIKE                     : 0.14173906
3 day(s) MAE                       : 0.37226407
3 day(s) RMSE                      : 0.42747413
3 day(s) R2                        : -2.31183803
3 day(s) Pearson r                 : -0.05459458
3 day(s) QLIKE                     : 0.25821640
5 day(s) MAE                       : 0.51646102
5 day(s) RMSE                      : 0.58919467
5 day(s) R2                        : -5.29436218
5 day(s) Pearson r                 : -0.03888504
5 day(s) QLIKE                     : 1.00949542
10 day(s) MAE                      : 0.60052811
10 day(s) RMSE                     : 0.65552728
10 day(s) R2                       : -6.79960329
10 day(s) Pearson r                : -0.03050430
10 day(s) QLIKE                    : 0.62445733
full horizon MAE                   : 0.60052811
full horizon RMSE                  : 0.65552728
full horizon R2                    : -6.79960329
full horizon Pearson r             : -0.03050430
full horizon QLIKE                 : 0.62445733

--- Task 4 ---
1 day(s) MAE                       : 0.94532825
1 day(s) RMSE                      : 2.40511870
1 day(s) R2                        : -0.16113515
1 day(s) Pearson r                 : 0.16392420
1 day(s) QLIKE                     : 1.18053512
3 day(s) MAE                       : 0.93542642
3 day(s) RMSE                      : 2.39195411
3 day(s) R2                        : -0.14890447
3 day(s) Pearson r                 : -0.02321096
3 day(s) QLIKE                     : 1.24017962
5 day(s) MAE                       : 0.92726861
5 day(s) RMSE                      : 2.37672442
5 day(s) R2                        : -0.13476117
5 day(s) Pearson r                 : -0.01299378
5 day(s) QLIKE                     : 1.26116145
10 day(s) MAE                      : 0.91821311
10 day(s) RMSE                     : 2.34959626
10 day(s) R2                       : -0.11007836
10 day(s) Pearson r                : -0.00692264
10 day(s) QLIKE                    : 1.28125426
full horizon MAE                   : 0.91821311
full horizon RMSE                  : 2.34959626
full horizon R2                    : -0.11007836
full horizon Pearson r             : -0.00692264
full horizon QLIKE                 : 1.28125426

--- Task 5 ---
1 day(s) MAE                       : 0.10187977
1 day(s) RMSE                      : 0.11602397
1 day(s) R2                        : -3.36772785
1 day(s) Pearson r                 : -0.03157379
1 day(s) QLIKE                     : 1.39840007
3 day(s) MAE                       : 0.10191437
3 day(s) RMSE                      : 0.11604526
3 day(s) R2                        : -3.37244381
3 day(s) Pearson r                 : -0.05218751
3 day(s) QLIKE                     : 1.72189558
5 day(s) MAE                       : 0.10194876
5 day(s) RMSE                      : 0.11606403
5 day(s) R2                        : -3.37752594
5 day(s) Pearson r                 : -0.01453023
5 day(s) QLIKE                     : 1.77009613
10 day(s) MAE                      : 0.10206671
10 day(s) RMSE                     : 0.11613667
10 day(s) R2                       : -3.39326406
10 day(s) Pearson r                : -0.01215192
10 day(s) QLIKE                    : 1.60399984
full horizon MAE                   : 0.10206671
full horizon RMSE                  : 0.11613667
full horizon R2                    : -3.39326406
full horizon Pearson r             : -0.01215192
full horizon QLIKE                 : 1.60399984

--- Task 6 ---
1 day(s) MAE                       : 0.84344995
1 day(s) RMSE                      : 1.00842292
1 day(s) R2                        : -0.15831338
1 day(s) Pearson r                 : 0.10767142
1 day(s) QLIKE                     : 0.06354944
3 day(s) MAE                       : 0.84955107
3 day(s) RMSE                      : 1.02046875
3 day(s) R2                        : -0.18702054
3 day(s) Pearson r                 : 0.10738355
3 day(s) QLIKE                     : 0.06351213
5 day(s) MAE                       : 0.84550462
5 day(s) RMSE                      : 1.00997754
5 day(s) R2                        : -0.16331849
5 day(s) Pearson r                 : 0.10726755
5 day(s) QLIKE                     : 0.06334066
10 day(s) MAE                      : 0.84510583
10 day(s) RMSE                     : 1.00809029
10 day(s) R2                       : -0.16140712
10 day(s) Pearson r                : 0.10826225
10 day(s) QLIKE                    : 0.06309717
full horizon MAE                   : 0.84510583
full horizon RMSE                  : 1.00809029
full horizon R2                    : -0.16140712
full horizon Pearson r             : 0.10826225
full horizon QLIKE                 : 0.06309717

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/BTCUSDT/Custom_KAN_LSTM_H10.pkl

Saved y_true min=0.505764, max=131.49
Saved y_pred min=1.84483, max=23.7528

=== BTCUSDT | H=20 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 2399
Time steps for y: 20
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1727, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  27.572355514245807
  Min value:  -0.39529821705307827
Checking X_time_train_core:
Shape: (1727, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.8178490491226165
Checking y_train_core (log_mse scaled):
Shape: (1727, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.564043013270618
  Min value:  -20.83818392077683
Checking X_price_val:
Shape: (192, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.1802886452741648
  Min value:  -0.3973706893292718
Checking X_time_val:
Shape: (192, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_val (log_mse scaled):
Shape: (192, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3500532890780494
  Min value:  -4.25835726179007
Checking X_price_test:
Shape: (480, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.92735513679459
  Min value:  -0.3866725144992877
Checking X_time_test:
Shape: (480, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_test (log_mse scaled):
Shape: (480, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5862887708961093
  Min value:  -109.4415927913587
Epoch 001 | phase=1 | train_loss=5.0493 | val_main=1.916485
Epoch 002 | phase=1 | train_loss=5.0089 | val_main=1.916466
Epoch 003 | phase=1 | train_loss=4.9715 | val_main=1.915412
Epoch 004 | phase=1 | train_loss=4.7112 | val_main=1.914359
Epoch 005 | phase=1 | train_loss=4.5654 | val_main=1.911446
Epoch 006 | phase=1 | train_loss=4.4531 | val_main=1.911848
Epoch 007 | phase=1 | train_loss=4.3952 | val_main=1.911482
Epoch 008 | phase=1 | train_loss=4.3343 | val_main=1.911443
Epoch 009 | phase=1 | train_loss=4.2924 | val_main=1.911326
Epoch 010 | phase=1 | train_loss=4.1556 | val_main=1.911229
Epoch 011 | phase=1 | train_loss=4.1045 | val_main=1.911913
Epoch 012 | phase=1 | train_loss=4.0427 | val_main=1.912493
Epoch 013 | phase=1 | train_loss=3.6715 | val_main=1.912251
Epoch 014 | phase=1 | train_loss=3.3791 | val_main=1.911154
Epoch 015 | phase=1 | train_loss=3.1020 | val_main=1.911810
Epoch 016 | phase=0 | train_loss=3.8672 | val_main=1.859176
Epoch 017 | phase=0 | train_loss=3.6755 | val_main=1.568526
Epoch 018 | phase=0 | train_loss=3.3774 | val_main=1.152454
Epoch 019 | phase=0 | train_loss=3.2453 | val_main=1.052262
Epoch 020 | phase=0 | train_loss=3.0597 | val_main=1.060718
Epoch 021 | phase=0 | train_loss=2.9602 | val_main=1.164556
Epoch 022 | phase=0 | train_loss=2.8300 | val_main=1.153736
Epoch 023 | phase=0 | train_loss=2.7821 | val_main=1.247784
Epoch 024 | phase=0 | train_loss=2.6419 | val_main=1.334602
Epoch 025 | phase=0 | train_loss=2.4879 | val_main=1.423818
Epoch 026 | phase=0 | train_loss=2.4123 | val_main=1.485629
Epoch 027 | phase=0 | train_loss=2.2875 | val_main=1.330217
Epoch 028 | phase=0 | train_loss=2.2101 | val_main=1.272612
Epoch 029 | phase=0 | train_loss=2.1877 | val_main=1.364105
Epoch 030 | phase=0 | train_loss=2.1223 | val_main=1.520788
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 120
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 4.55576110
1 day(s) RMSE                      : 9.41134137
1 day(s) R2                        : -0.04967544
1 day(s) Pearson r                 : 0.14832844
1 day(s) QLIKE                     : 0.37702536
3 day(s) MAE                       : 4.61521300
3 day(s) RMSE                      : 9.46109668
3 day(s) R2                        : -0.06065651
3 day(s) Pearson r                 : 0.10767853
3 day(s) QLIKE                     : 0.39119797
5 day(s) MAE                       : 4.64228867
5 day(s) RMSE                      : 9.51026208
5 day(s) R2                        : -0.07204480
5 day(s) Pearson r                 : 0.07659236
5 day(s) QLIKE                     : 0.40107959
10 day(s) MAE                      : 4.68962814
10 day(s) RMSE                     : 9.53237218
10 day(s) R2                       : -0.07677614
10 day(s) Pearson r                : 0.03939701
10 day(s) QLIKE                    : 0.41076681
20 day(s) MAE                      : 4.77487864
20 day(s) RMSE                     : 9.54526459
20 day(s) R2                       : -0.08007218
20 day(s) Pearson r                : -0.00372882
20 day(s) QLIKE                    : 0.41913431
full horizon MAE                   : 4.77487864
full horizon RMSE                  : 9.54526459
full horizon R2                    : -0.08007218
full horizon Pearson r             : -0.00372882
full horizon QLIKE                 : 0.41913431

--- Task 2 ---
1 day(s) MAE                       : 0.14746084
1 day(s) RMSE                      : 0.25347221
1 day(s) R2                        : -0.20843540
1 day(s) Pearson r                 : -0.14340819
1 day(s) QLIKE                     : 18.35615076
3 day(s) MAE                       : 0.13851570
3 day(s) RMSE                      : 0.24445384
3 day(s) R2                        : -0.12402929
3 day(s) Pearson r                 : -0.12626551
3 day(s) QLIKE                     : 18.41860985
5 day(s) MAE                       : 0.13841744
5 day(s) RMSE                      : 0.24409730
5 day(s) R2                        : -0.12079798
5 day(s) Pearson r                 : -0.11062499
5 day(s) QLIKE                     : 17.98306682
10 day(s) MAE                      : 0.14613127
10 day(s) RMSE                     : 0.24557806
10 day(s) R2                       : -0.13454253
10 day(s) Pearson r                : -0.10626370
10 day(s) QLIKE                    : 17.47893326
20 day(s) MAE                      : 0.15061372
20 day(s) RMSE                     : 0.24428354
20 day(s) R2                       : -0.12284490
20 day(s) Pearson r                : -0.06740803
20 day(s) QLIKE                    : 16.94269192
full horizon MAE                   : 0.15061372
full horizon RMSE                  : 0.24428354
full horizon R2                    : -0.12284490
full horizon Pearson r             : -0.06740803
full horizon QLIKE                 : 16.94269192

--- Task 3 ---
1 day(s) MAE                       : 0.25643819
1 day(s) RMSE                      : 0.33656749
1 day(s) R2                        : -1.05211948
1 day(s) Pearson r                 : -0.07660723
1 day(s) QLIKE                     : 0.19377279
3 day(s) MAE                       : 0.25723074
3 day(s) RMSE                      : 0.33881584
3 day(s) R2                        : -1.08054356
3 day(s) Pearson r                 : -0.08040417
3 day(s) QLIKE                     : 0.19613411
5 day(s) MAE                       : 0.25896326
5 day(s) RMSE                      : 0.34006289
5 day(s) R2                        : -1.09677873
5 day(s) Pearson r                 : -0.08333522
5 day(s) QLIKE                     : 0.19630275
10 day(s) MAE                      : 0.25863980
10 day(s) RMSE                     : 0.33745669
10 day(s) R2                       : -1.06693621
10 day(s) Pearson r                : -0.06945720
10 day(s) QLIKE                    : 0.19302514
20 day(s) MAE                      : 0.25317736
20 day(s) RMSE                     : 0.32339567
20 day(s) R2                       : -0.90239125
20 day(s) Pearson r                : -0.03419494
20 day(s) QLIKE                    : 0.18031805
full horizon MAE                   : 0.25317736
full horizon RMSE                  : 0.32339567
full horizon R2                    : -0.90239125
full horizon Pearson r             : -0.03419494
full horizon QLIKE                 : 0.18031805

--- Task 4 ---
1 day(s) MAE                       : 1.29526450
1 day(s) RMSE                      : 2.37135818
1 day(s) R2                        : -0.12876635
1 day(s) Pearson r                 : -0.12955965
1 day(s) QLIKE                     : 1.58490220
3 day(s) MAE                       : 1.29444395
3 day(s) RMSE                      : 2.36877506
3 day(s) R2                        : -0.12674561
3 day(s) Pearson r                 : -0.12796194
3 day(s) QLIKE                     : 1.57249457
5 day(s) MAE                       : 1.29806179
5 day(s) RMSE                      : 2.36664622
5 day(s) R2                        : -0.12515795
5 day(s) Pearson r                 : -0.12868792
5 day(s) QLIKE                     : 1.55922555
10 day(s) MAE                      : 1.30924986
10 day(s) RMSE                     : 2.36444244
10 day(s) R2                       : -0.12415098
10 day(s) Pearson r                : -0.12663820
10 day(s) QLIKE                    : 1.52667806
20 day(s) MAE                      : 1.29086009
20 day(s) RMSE                     : 2.33703009
20 day(s) R2                       : -0.10030993
20 day(s) Pearson r                : -0.08501849
20 day(s) QLIKE                    : 1.41905273
full horizon MAE                   : 1.29086009
full horizon RMSE                  : 2.33703009
full horizon R2                    : -0.10030993
full horizon Pearson r             : -0.08501849
full horizon QLIKE                 : 1.41905273

--- Task 5 ---
1 day(s) MAE                       : 0.10187994
1 day(s) RMSE                      : 0.11602293
1 day(s) R2                        : -3.36764941
1 day(s) Pearson r                 : 0.20395976
1 day(s) QLIKE                     : 3.15644154
3 day(s) MAE                       : 0.10191975
3 day(s) RMSE                      : 0.11604865
3 day(s) R2                        : -3.37269909
3 day(s) Pearson r                 : 0.17569493
3 day(s) QLIKE                     : 3.25236781
5 day(s) MAE                       : 0.10196102
5 day(s) RMSE                      : 0.11607377
5 day(s) R2                        : -3.37826079
5 day(s) Pearson r                 : 0.17036797
5 day(s) QLIKE                     : 3.29776166
10 day(s) MAE                      : 0.10207101
10 day(s) RMSE                     : 0.11613962
10 day(s) R2                       : -3.39348698
10 day(s) Pearson r                : 0.15683252
10 day(s) QLIKE                    : 3.30539949
20 day(s) MAE                      : 0.10228001
20 day(s) RMSE                     : 0.11627587
20 day(s) R2                       : -3.41977375
20 day(s) Pearson r                : 0.12395752
20 day(s) QLIKE                    : 3.27328180
full horizon MAE                   : 0.10228001
full horizon RMSE                  : 0.11627587
full horizon R2                    : -3.41977375
full horizon Pearson r             : 0.12395752
full horizon QLIKE                 : 3.27328180

--- Task 6 ---
1 day(s) MAE                       : 1.03753126
1 day(s) RMSE                      : 1.29762998
1 day(s) R2                        : -0.91797259
1 day(s) Pearson r                 : -0.16224976
1 day(s) QLIKE                     : 0.07244159
3 day(s) MAE                       : 1.04247506
3 day(s) RMSE                      : 1.29896350
3 day(s) R2                        : -0.92332515
3 day(s) Pearson r                 : -0.16677505
3 day(s) QLIKE                     : 0.07253204
5 day(s) MAE                       : 1.04645372
5 day(s) RMSE                      : 1.30064462
5 day(s) R2                        : -0.92926773
5 day(s) Pearson r                 : -0.16132101
5 day(s) QLIKE                     : 0.07244349
10 day(s) MAE                      : 1.05279205
10 day(s) RMSE                     : 1.30687445
10 day(s) R2                       : -0.95188108
10 day(s) Pearson r                : -0.16648684
10 day(s) QLIKE                    : 0.07243477
20 day(s) MAE                      : 1.06404476
20 day(s) RMSE                     : 1.31115429
20 day(s) R2                       : -0.97429683
20 day(s) Pearson r                : -0.15729512
20 day(s) QLIKE                    : 0.07181331
full horizon MAE                   : 1.06404476
full horizon RMSE                  : 1.31115429
full horizon R2                    : -0.97429683
full horizon Pearson r             : -0.15729512
full horizon QLIKE                 : 0.07181331

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/BTCUSDT/Custom_KAN_H20.pkl

Saved y_true min=0.505764, max=131.49
Saved y_pred min=2.77705, max=13.9634

=== BTCUSDT | H=20 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 2399
Time steps for y: 20
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (1727, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  27.572355514245807
  Min value:  -0.39529821705307827
Checking X_time_train_core:
Shape: (1727, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.8178490491226165
Checking y_train_core (log_mse scaled):
Shape: (1727, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.564043013270618
  Min value:  -20.83818392077683
Checking X_price_val:
Shape: (192, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.1802886452741648
  Min value:  -0.3973706893292718
Checking X_time_val:
Shape: (192, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_val (log_mse scaled):
Shape: (192, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3500532890780494
  Min value:  -4.25835726179007
Checking X_price_test:
Shape: (480, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.92735513679459
  Min value:  -0.3866725144992877
Checking X_time_test:
Shape: (480, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.510852258333365
  Min value:  -1.5280954648836782
Checking y_test (log_mse scaled):
Shape: (480, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5862887708961093
  Min value:  -109.4415927913587
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 164s 528ms/step - loss: 0.7461 - val_loss: 2.4542 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 20s 374ms/step - loss: 0.3970 - val_loss: 2.5640 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 25s 468ms/step - loss: 0.3517 - val_loss: 2.9173 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 26s 480ms/step - loss: 0.3344 - val_loss: 2.0586 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 26s 476ms/step - loss: 0.3190 - val_loss: 1.7564 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 27s 499ms/step - loss: 0.3091 - val_loss: 1.9950 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 26s 489ms/step - loss: 0.2977 - val_loss: 1.7651 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 27s 506ms/step - loss: 0.2847 - val_loss: 1.8764 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 27s 495ms/step - loss: 0.2747 - val_loss: 1.7102 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=10 tf_ratio=0.526 -> TF=ON
Epoch 10/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 27s 504ms/step - loss: 0.2742 - val_loss: 1.8558 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=11 tf_ratio=0.474 -> TF=ON
Epoch 11/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 27s 500ms/step - loss: 0.2597 - val_loss: 1.8159 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=12 tf_ratio=0.421 -> TF=ON
Epoch 12/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 26s 476ms/step - loss: 0.2576 - val_loss: 1.5302 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=13 tf_ratio=0.368 -> TF=ON
Epoch 13/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 26s 486ms/step - loss: 0.2515 - val_loss: 1.4402 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=14 tf_ratio=0.316 -> TF=ON
Epoch 14/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 27s 501ms/step - loss: 0.2464 - val_loss: 1.5442 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=15 tf_ratio=0.263 -> TF=ON
Epoch 15/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 26s 487ms/step - loss: 0.2463 - val_loss: 1.5399 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=16 tf_ratio=0.211 -> TF=ON
Epoch 16/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 29s 535ms/step - loss: 0.2418 - val_loss: 1.6704 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=17 tf_ratio=0.158 -> TF=ON
Epoch 17/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 26s 482ms/step - loss: 0.2331 - val_loss: 5.5578 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=18 tf_ratio=0.105 -> TF=ON
Epoch 18/20
54/54 ━━━━━━━━━━━━━━━━━━━━ 0s 507ms/step - loss: 0.2127
Epoch 18: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
54/54 ━━━━━━━━━━━━━━━━━━━━ 28s 521ms/step - loss: 0.2275 - val_loss: 2.3629 - learning_rate: 5.0000e-04
Epoch 18: early stopping
Restoring model weights from the end of the best epoch: 13.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 20
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 5.10259546
1 day(s) RMSE                      : 9.14681509
1 day(s) R2                        : 0.00850214
1 day(s) Pearson r                 : 0.31565626
1 day(s) QLIKE                     : 0.37565575
3 day(s) MAE                       : 7.15673213
3 day(s) RMSE                      : 10.13450040
3 day(s) R2                        : -0.21701657
3 day(s) Pearson r                 : 0.01720221
3 day(s) QLIKE                     : 0.40795743
5 day(s) MAE                       : 6.21777157
5 day(s) RMSE                      : 9.79472430
5 day(s) R2                        : -0.13713597
5 day(s) Pearson r                 : 0.01155481
5 day(s) QLIKE                     : 0.42465603
10 day(s) MAE                      : 5.45924871
10 day(s) RMSE                     : 9.76158995
10 day(s) R2                       : -0.12918361
10 day(s) Pearson r                : 0.00698171
10 day(s) QLIKE                    : 0.45722184
20 day(s) MAE                      : 5.05987316
20 day(s) RMSE                     : 9.71482561
20 day(s) R2                       : -0.11878556
20 day(s) Pearson r                : 0.00459797
20 day(s) QLIKE                    : 0.42478437
full horizon MAE                   : 5.05987316
full horizon RMSE                  : 9.71482561
full horizon R2                    : -0.11878556
full horizon Pearson r             : 0.00459797
full horizon QLIKE                 : 0.42478437

--- Task 2 ---
1 day(s) MAE                       : 0.09371143
1 day(s) RMSE                      : 0.24754012
1 day(s) R2                        : -0.15253448
1 day(s) Pearson r                 : 0.15218284
1 day(s) QLIKE                     : 3.38602540
3 day(s) MAE                       : 0.09713229
3 day(s) RMSE                      : 0.24952482
3 day(s) R2                        : -0.17114703
3 day(s) Pearson r                 : 0.00157275
3 day(s) QLIKE                     : 5.97830659
5 day(s) MAE                       : 0.09883997
5 day(s) RMSE                      : 0.25046731
5 day(s) R2                        : -0.18005832
5 day(s) Pearson r                 : 0.00087634
5 day(s) QLIKE                     : 11.94781212
10 day(s) MAE                      : 0.10014944
10 day(s) RMSE                     : 0.25117682
10 day(s) R2                       : -0.18686349
10 day(s) Pearson r                : 0.00045363
10 day(s) QLIKE                    : 10.33221574
20 day(s) MAE                      : 0.10087653
20 day(s) RMSE                     : 0.25154310
20 day(s) R2                       : -0.19057336
20 day(s) Pearson r                : 0.00017224
20 day(s) QLIKE                    : 6.92043695
full horizon MAE                   : 0.10087653
full horizon RMSE                  : 0.25154310
full horizon R2                    : -0.19057336
full horizon Pearson r             : 0.00017224
full horizon QLIKE                 : 6.92043695

--- Task 3 ---
1 day(s) MAE                       : 0.25659222
1 day(s) RMSE                      : 0.28225921
1 day(s) R2                        : -0.44329320
1 day(s) Pearson r                 : -0.13825827
1 day(s) QLIKE                     : 0.13957354
3 day(s) MAE                       : 0.54629008
3 day(s) RMSE                      : 0.61961124
3 day(s) R2                        : -5.95805697
3 day(s) Pearson r                 : 0.00059691
3 day(s) QLIKE                     : 2.53628716
5 day(s) MAE                       : 0.60400579
5 day(s) RMSE                      : 0.66110458
5 day(s) R2                        : -6.92454705
5 day(s) Pearson r                 : 0.00082776
5 day(s) QLIKE                     : 2.31435183
10 day(s) MAE                      : 0.68004622
10 day(s) RMSE                     : 0.72805693
10 day(s) R2                       : -8.62103207
10 day(s) Pearson r                : 0.00172133
10 day(s) QLIKE                    : 1.66904781
20 day(s) MAE                      : 0.70459246
20 day(s) RMSE                     : 0.74457766
20 day(s) R2                       : -9.08443192
20 day(s) Pearson r                : 0.00218183
20 day(s) QLIKE                    : 1.23766548
full horizon MAE                   : 0.70459246
full horizon RMSE                  : 0.74457766
full horizon R2                    : -9.08443192
full horizon Pearson r             : 0.00218183
full horizon QLIKE                 : 1.23766548

--- Task 4 ---
1 day(s) MAE                       : 0.92325482
1 day(s) RMSE                      : 2.37794651
1 day(s) R2                        : -0.13504716
1 day(s) Pearson r                 : 0.13099576
1 day(s) QLIKE                     : 1.21119054
3 day(s) MAE                       : 0.91763702
3 day(s) RMSE                      : 2.36656122
3 day(s) R2                        : -0.12464050
3 day(s) Pearson r                 : 0.00514399
3 day(s) QLIKE                     : 1.21257725
5 day(s) MAE                       : 0.91242053
5 day(s) RMSE                      : 2.35348467
5 day(s) R2                        : -0.11267814
5 day(s) Pearson r                 : 0.00149693
5 day(s) QLIKE                     : 1.22470247
10 day(s) MAE                      : 0.92082094
10 day(s) RMSE                     : 2.32661983
10 day(s) R2                       : -0.08847386
10 day(s) Pearson r                : 0.00083829
10 day(s) QLIKE                    : 1.25033675
20 day(s) MAE                      : 0.92537302
20 day(s) RMSE                     : 2.34845076
20 day(s) R2                       : -0.11109027
20 day(s) Pearson r                : -0.00565711
20 day(s) QLIKE                    : 1.24572087
full horizon MAE                   : 0.92537302
full horizon RMSE                  : 2.34845076
full horizon R2                    : -0.11109027
full horizon Pearson r             : -0.00565711
full horizon QLIKE                 : 1.24572087

--- Task 5 ---
1 day(s) MAE                       : 0.10188527
1 day(s) RMSE                      : 0.11602869
1 day(s) R2                        : -3.36808357
1 day(s) Pearson r                 : -0.00359972
1 day(s) QLIKE                     : 0.33878841
3 day(s) MAE                       : 0.10163868
3 day(s) RMSE                      : 0.11580003
3 day(s) R2                        : -3.35398329
3 day(s) Pearson r                 : -0.02029935
3 day(s) QLIKE                     : 2.74290528
5 day(s) MAE                       : 0.10090217
5 day(s) RMSE                      : 0.11513583
5 day(s) R2                        : -3.30778866
5 day(s) Pearson r                 : -0.00121892
5 day(s) QLIKE                     : 4.00555613
10 day(s) MAE                      : 0.10097006
10 day(s) RMSE                     : 0.11516131
10 day(s) R2                       : -3.31978149
10 day(s) Pearson r                : 0.00774951
10 day(s) QLIKE                    : 4.07293606
20 day(s) MAE                      : 0.10153976
20 day(s) RMSE                     : 0.11561630
20 day(s) R2                       : -3.36977441
20 day(s) Pearson r                : 0.00741462
20 day(s) QLIKE                    : 3.46614341
full horizon MAE                   : 0.10153976
full horizon RMSE                  : 0.11561630
full horizon R2                    : -3.36977441
full horizon Pearson r             : 0.00741462
full horizon QLIKE                 : 3.46614341

--- Task 6 ---
1 day(s) MAE                       : 0.81624705
1 day(s) RMSE                      : 0.95341311
1 day(s) R2                        : -0.03538745
1 day(s) Pearson r                 : -0.06734122
1 day(s) QLIKE                     : 0.06349578
3 day(s) MAE                       : 0.82266390
3 day(s) RMSE                      : 0.96995995
3 day(s) R2                        : -0.07242375
3 day(s) Pearson r                 : -0.00007699
3 day(s) QLIKE                     : 0.06357716
5 day(s) MAE                       : 0.83209753
5 day(s) RMSE                      : 0.99073601
5 day(s) R2                        : -0.11941493
5 day(s) Pearson r                 : -0.00126576
5 day(s) QLIKE                     : 0.06382186
10 day(s) MAE                      : 0.86018390
10 day(s) RMSE                     : 1.04142659
10 day(s) R2                       : -0.23948978
10 day(s) Pearson r                : -0.00550528
10 day(s) QLIKE                    : 0.06448723
20 day(s) MAE                      : 0.86802675
20 day(s) RMSE                     : 1.05116132
20 day(s) R2                       : -0.26894731
20 day(s) Pearson r                : -0.00567925
20 day(s) QLIKE                    : 0.06385976
full horizon MAE                   : 0.86802675
full horizon RMSE                  : 1.05116132
full horizon R2                    : -0.26894731
full horizon Pearson r             : -0.00567925
full horizon QLIKE                 : 0.06385976

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/BTCUSDT/Custom_KAN_LSTM_H20.pkl

Saved y_true min=0.505764, max=131.49
Saved y_pred min=3.79999, max=14.8047

=== EURUSD | H=1 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 1
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525025
  Min value:  -0.7148707238463011
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.747719247380912
  Min value:  -3.718602223017602
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.408921165171427
  Min value:  -0.7103978753601863
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.319059579642507
  Min value:  -2.9759282116206096
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144182
  Min value:  -0.7173043382357188
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3834322546316087
  Min value:  -5.3499699676080334
Epoch 001 | phase=1 | train_loss=4.9491 | val_main=1.189717
Epoch 002 | phase=1 | train_loss=4.5831 | val_main=1.188764
Epoch 003 | phase=1 | train_loss=4.4385 | val_main=1.188539
Epoch 004 | phase=1 | train_loss=4.2795 | val_main=1.188661
Epoch 005 | phase=1 | train_loss=4.1004 | val_main=1.188490
Epoch 006 | phase=1 | train_loss=3.9730 | val_main=1.188741
Epoch 007 | phase=1 | train_loss=3.8455 | val_main=1.189212
Epoch 008 | phase=1 | train_loss=3.7594 | val_main=1.189379
Epoch 009 | phase=1 | train_loss=3.5401 | val_main=1.189346
Epoch 010 | phase=1 | train_loss=3.4324 | val_main=1.189356
Epoch 011 | phase=1 | train_loss=3.3489 | val_main=1.189455
Epoch 012 | phase=1 | train_loss=3.2178 | val_main=1.189419
Epoch 013 | phase=1 | train_loss=3.0417 | val_main=1.189316
Epoch 014 | phase=1 | train_loss=2.9450 | val_main=1.189335
Epoch 015 | phase=1 | train_loss=2.8781 | val_main=1.189283
Epoch 016 | phase=0 | train_loss=3.6702 | val_main=0.826157
Epoch 017 | phase=0 | train_loss=3.3364 | val_main=0.575484
Epoch 018 | phase=0 | train_loss=2.9042 | val_main=0.427556
Epoch 019 | phase=0 | train_loss=2.6866 | val_main=0.419603
Epoch 020 | phase=0 | train_loss=2.5815 | val_main=0.409484
Epoch 021 | phase=0 | train_loss=2.4291 | val_main=0.412038
Epoch 022 | phase=0 | train_loss=2.3013 | val_main=0.376453
Epoch 023 | phase=0 | train_loss=2.2064 | val_main=0.372456
Epoch 024 | phase=0 | train_loss=2.0367 | val_main=0.380145
Epoch 025 | phase=0 | train_loss=1.8998 | val_main=0.357460
Epoch 026 | phase=0 | train_loss=1.8025 | val_main=0.359772
Epoch 027 | phase=0 | train_loss=1.7061 | val_main=0.355926
Epoch 028 | phase=0 | train_loss=1.6477 | val_main=0.349449
Epoch 029 | phase=0 | train_loss=1.5600 | val_main=0.350542
Epoch 030 | phase=0 | train_loss=1.4152 | val_main=0.344691
Epoch 031 | phase=2 | train_loss=0.1441 | val_main=0.362259
Epoch 032 | phase=2 | train_loss=0.1400 | val_main=0.343762
Epoch 033 | phase=2 | train_loss=0.1229 | val_main=0.359692
Epoch 034 | phase=2 | train_loss=0.1167 | val_main=0.369957
Epoch 035 | phase=2 | train_loss=0.1099 | val_main=0.378125
Epoch 036 | phase=2 | train_loss=0.1009 | val_main=0.393970
Epoch 037 | phase=2 | train_loss=0.0959 | val_main=0.383327
Epoch 038 | phase=2 | train_loss=0.0906 | val_main=0.406622
Epoch 039 | phase=2 | train_loss=0.0852 | val_main=0.402736
Epoch 040 | phase=2 | train_loss=0.0782 | val_main=0.422376
Epoch 041 | phase=2 | train_loss=0.0731 | val_main=0.424579
Epoch 042 | phase=2 | train_loss=0.0711 | val_main=0.429025
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 6
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.08815789
1 day(s) RMSE                      : 0.17309408
1 day(s) R2                        : 0.34357247
1 day(s) Pearson r                 : 0.59980755
1 day(s) QLIKE                     : 0.42895226
full horizon MAE                   : 0.08815789
full horizon RMSE                  : 0.17309408
full horizon R2                    : 0.34357247
full horizon Pearson r             : 0.59980755
full horizon QLIKE                 : 0.42895226

--- Task 2 ---
1 day(s) MAE                       : 0.03443979
1 day(s) RMSE                      : 0.05443198
1 day(s) R2                        : -0.50406138
1 day(s) Pearson r                 : -0.04452532
1 day(s) QLIKE                     : 19.22167861
full horizon MAE                   : 0.03443979
full horizon RMSE                  : 0.05443198
full horizon R2                    : -0.50406138
full horizon Pearson r             : -0.04452532
full horizon QLIKE                 : 19.22167861

--- Task 3 ---
1 day(s) MAE                       : 0.82175664
1 day(s) RMSE                      : 1.24762956
1 day(s) R2                        : -10.52396334
1 day(s) Pearson r                 : -0.01867348
1 day(s) QLIKE                     : 23.54696320
full horizon MAE                   : 0.82175664
full horizon RMSE                  : 1.24762956
full horizon R2                    : -10.52396334
full horizon Pearson r             : -0.01867348
full horizon QLIKE                 : 23.54696320

--- Task 4 ---
1 day(s) MAE                       : 0.03953752
1 day(s) RMSE                      : 0.06383919
1 day(s) R2                        : -0.45874167
1 day(s) Pearson r                 : -0.10693423
1 day(s) QLIKE                     : 14.12030956
full horizon MAE                   : 0.03953752
full horizon RMSE                  : 0.06383919
full horizon R2                    : -0.45874167
full horizon Pearson r             : -0.10693423
full horizon QLIKE                 : 14.12030956

--- Task 5 ---
1 day(s) MAE                       : 0.00880799
1 day(s) RMSE                      : 0.01389087
1 day(s) R2                        : -4.56074994
1 day(s) Pearson r                 : 0.21069232
1 day(s) QLIKE                     : 17.38174351
full horizon MAE                   : 0.00880799
full horizon RMSE                  : 0.01389087
full horizon R2                    : -4.56074994
full horizon Pearson r             : 0.21069232
full horizon QLIKE                 : 17.38174351

--- Task 6 ---
1 day(s) MAE                       : 1.18041513
1 day(s) RMSE                      : 3.52821210
1 day(s) R2                        : -7.14670575
1 day(s) Pearson r                 : 0.17864742
1 day(s) QLIKE                     : 0.03015396
full horizon MAE                   : 1.18041513
full horizon RMSE                  : 3.52821210
full horizon R2                    : -7.14670575
full horizon Pearson r             : 0.17864742
full horizon QLIKE                 : 0.03015396

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/EURUSD/Custom_KAN_H1.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00224037, max=1.21855

=== EURUSD | H=1 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 3782
Time steps for y: 1
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525025
  Min value:  -0.7148707238463011
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.747719247380912
  Min value:  -3.718602223017602
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.408921165171427
  Min value:  -0.7103978753601863
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.319059579642507
  Min value:  -2.9759282116206096
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144182
  Min value:  -0.7173043382357188
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3834322546316087
  Min value:  -5.3499699676080334
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 150s 294ms/step - loss: 0.9784 - val_loss: 1.0328 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 16s 184ms/step - loss: 0.9266 - val_loss: 0.8935 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 17s 197ms/step - loss: 0.9300 - val_loss: 0.8793 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 17s 194ms/step - loss: 0.9022 - val_loss: 0.8725 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 17s 203ms/step - loss: 0.8984 - val_loss: 0.8811 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 17s 204ms/step - loss: 0.8853 - val_loss: 0.8954 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 17s 199ms/step - loss: 0.8764 - val_loss: 0.8727 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 17s 201ms/step - loss: 0.8937 - val_loss: 1.0827 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 17s 201ms/step - loss: 0.9277 - val_loss: 0.8658 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=10 tf_ratio=0.526 -> TF=ON
Epoch 10/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 17s 200ms/step - loss: 0.9094 - val_loss: 0.8745 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=11 tf_ratio=0.474 -> TF=ON
Epoch 11/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 17s 202ms/step - loss: 0.9004 - val_loss: 0.8659 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=12 tf_ratio=0.421 -> TF=ON
Epoch 12/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 17s 202ms/step - loss: 0.8929 - val_loss: 0.8632 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=13 tf_ratio=0.368 -> TF=ON
Epoch 13/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 17s 201ms/step - loss: 0.8849 - val_loss: 0.8632 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=14 tf_ratio=0.316 -> TF=ON
Epoch 14/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 19s 225ms/step - loss: 0.8791 - val_loss: 0.8646 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=15 tf_ratio=0.263 -> TF=ON
Epoch 15/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 18s 212ms/step - loss: 0.8780 - val_loss: 0.8649 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=16 tf_ratio=0.211 -> TF=ON
Epoch 16/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 18s 213ms/step - loss: 0.8686 - val_loss: 0.8700 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=17 tf_ratio=0.158 -> TF=ON
Epoch 17/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 0s 206ms/step - loss: 0.8813
Epoch 17: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
86/86 ━━━━━━━━━━━━━━━━━━━━ 18s 212ms/step - loss: 0.8743 - val_loss: 1.0731 - learning_rate: 5.0000e-04
Epoch 17: early stopping
Restoring model weights from the end of the best epoch: 12.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 1
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.10766944
1 day(s) RMSE                      : 0.20219395
1 day(s) R2                        : 0.10430812
1 day(s) Pearson r                 : 0.41305027
1 day(s) QLIKE                     : 0.48990247
full horizon MAE                   : 0.10766944
full horizon RMSE                  : 0.20219395
full horizon R2                    : 0.10430812
full horizon Pearson r             : 0.41305027
full horizon QLIKE                 : 0.48990247

--- Task 2 ---
1 day(s) MAE                       : 0.03446843
1 day(s) RMSE                      : 0.05584583
1 day(s) R2                        : -0.58321144
1 day(s) Pearson r                 : -0.08214266
1 day(s) QLIKE                     : 13.67497443
full horizon MAE                   : 0.03446843
full horizon RMSE                  : 0.05584583
full horizon R2                    : -0.58321144
full horizon Pearson r             : -0.08214266
full horizon QLIKE                 : 13.67497443

--- Task 3 ---
1 day(s) MAE                       : 0.64213141
1 day(s) RMSE                      : 0.73495240
1 day(s) R2                        : -2.99897774
1 day(s) Pearson r                 : 0.18658454
1 day(s) QLIKE                     : 3.65706645
full horizon MAE                   : 0.64213141
full horizon RMSE                  : 0.73495240
full horizon R2                    : -2.99897774
full horizon Pearson r             : 0.18658454
full horizon QLIKE                 : 3.65706645

--- Task 4 ---
1 day(s) MAE                       : 0.03256461
1 day(s) RMSE                      : 0.06170885
1 day(s) R2                        : -0.36300846
1 day(s) Pearson r                 : -0.19075945
1 day(s) QLIKE                     : 6.23364200
full horizon MAE                   : 0.03256461
full horizon RMSE                  : 0.06170885
full horizon R2                    : -0.36300846
full horizon Pearson r             : -0.19075945
full horizon QLIKE                 : 6.23364200

--- Task 5 ---
1 day(s) MAE                       : 0.00512760
1 day(s) RMSE                      : 0.00780969
1 day(s) R2                        : -0.75768994
1 day(s) Pearson r                 : 0.41595537
1 day(s) QLIKE                     : 13.59205659
full horizon MAE                   : 0.00512760
full horizon RMSE                  : 0.00780969
full horizon R2                    : -0.75768994
full horizon Pearson r             : 0.41595537
full horizon QLIKE                 : 13.59205659

--- Task 6 ---
1 day(s) MAE                       : 1.31008082
1 day(s) RMSE                      : 1.61721115
1 day(s) R2                        : -0.71161511
1 day(s) Pearson r                 : -0.24099428
1 day(s) QLIKE                     : 0.04352173
full horizon MAE                   : 1.31008082
full horizon RMSE                  : 1.61721115
full horizon R2                    : -0.71161511
full horizon Pearson r             : -0.24099428
full horizon QLIKE                 : 0.04352173

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/EURUSD/Custom_KAN_LSTM_H1.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.0522551, max=0.272158

=== EURUSD | H=5 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 5
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525025
  Min value:  -0.7148707238463011
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.747698926836269
  Min value:  -3.7204515821714925
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.408921165171427
  Min value:  -0.7103978753601863
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3194271117573377
  Min value:  -2.977504877870329
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144182
  Min value:  -0.7173043382357188
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3838234229198108
  Min value:  -5.352418327671095
Epoch 001 | phase=1 | train_loss=4.9940 | val_main=1.203375
Epoch 002 | phase=1 | train_loss=4.6248 | val_main=1.202117
Epoch 003 | phase=1 | train_loss=4.4663 | val_main=1.202000
Epoch 004 | phase=1 | train_loss=4.3465 | val_main=1.202120
Epoch 005 | phase=1 | train_loss=4.2458 | val_main=1.201796
Epoch 006 | phase=1 | train_loss=4.1529 | val_main=1.201829
Epoch 007 | phase=1 | train_loss=3.9906 | val_main=1.201987
Epoch 008 | phase=1 | train_loss=3.9169 | val_main=1.201990
Epoch 009 | phase=1 | train_loss=3.7260 | val_main=1.201890
Epoch 010 | phase=1 | train_loss=3.6142 | val_main=1.202271
Epoch 011 | phase=1 | train_loss=3.4321 | val_main=1.202025
Epoch 012 | phase=1 | train_loss=3.3184 | val_main=1.202177
Epoch 013 | phase=1 | train_loss=3.0951 | val_main=1.202007
Epoch 014 | phase=1 | train_loss=2.9953 | val_main=1.201985
Epoch 015 | phase=1 | train_loss=2.8475 | val_main=1.202243
Epoch 016 | phase=0 | train_loss=3.6638 | val_main=0.965766
Epoch 017 | phase=0 | train_loss=3.4534 | val_main=0.886018
Epoch 018 | phase=0 | train_loss=3.2850 | val_main=0.820569
Epoch 019 | phase=0 | train_loss=3.1268 | val_main=0.714860
Epoch 020 | phase=0 | train_loss=2.9417 | val_main=0.598589
Epoch 021 | phase=0 | train_loss=2.7258 | val_main=0.529892
Epoch 022 | phase=0 | train_loss=2.6052 | val_main=0.433917
Epoch 023 | phase=0 | train_loss=2.4820 | val_main=0.389224
Epoch 024 | phase=0 | train_loss=2.3154 | val_main=0.382612
Epoch 025 | phase=0 | train_loss=2.1660 | val_main=0.361810
Epoch 026 | phase=0 | train_loss=2.0976 | val_main=0.359725
Epoch 027 | phase=0 | train_loss=2.0313 | val_main=0.353338
Epoch 028 | phase=0 | train_loss=1.9753 | val_main=0.342450
Epoch 029 | phase=0 | train_loss=1.9022 | val_main=0.348529
Epoch 030 | phase=0 | train_loss=1.8952 | val_main=0.345696
Epoch 031 | phase=2 | train_loss=0.2066 | val_main=0.346874
Epoch 032 | phase=2 | train_loss=0.2035 | val_main=0.345580
Epoch 033 | phase=2 | train_loss=0.1911 | val_main=0.347458
Epoch 034 | phase=2 | train_loss=0.1886 | val_main=0.346024
Epoch 035 | phase=2 | train_loss=0.1805 | val_main=0.349525
Epoch 036 | phase=2 | train_loss=0.1728 | val_main=0.356478
Epoch 037 | phase=2 | train_loss=0.1678 | val_main=0.349051
Epoch 038 | phase=2 | train_loss=0.1614 | val_main=0.354963
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 30
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.08836400
1 day(s) RMSE                      : 0.17753344
1 day(s) R2                        : 0.30946981
1 day(s) Pearson r                 : 0.57055601
1 day(s) QLIKE                     : 0.36900157
3 day(s) MAE                       : 0.09096749
3 day(s) RMSE                      : 0.18251161
3 day(s) R2                        : 0.26457066
3 day(s) Pearson r                 : 0.52865547
3 day(s) QLIKE                     : 0.39110105
5 day(s) MAE                       : 0.09204536
5 day(s) RMSE                      : 0.18405891
5 day(s) R2                        : 0.24354871
5 day(s) Pearson r                 : 0.50923234
5 day(s) QLIKE                     : 0.40632532
full horizon MAE                   : 0.09204536
full horizon RMSE                  : 0.18405891
full horizon R2                    : 0.24354871
full horizon Pearson r             : 0.50923234
full horizon QLIKE                 : 0.40632532

--- Task 2 ---
1 day(s) MAE                       : 0.03469110
1 day(s) RMSE                      : 0.05518527
1 day(s) R2                        : -0.54597916
1 day(s) Pearson r                 : -0.08075569
1 day(s) QLIKE                     : 19.25089995
3 day(s) MAE                       : 0.03455853
3 day(s) RMSE                      : 0.05499603
3 day(s) R2                        : -0.53500736
3 day(s) Pearson r                 : -0.06903299
3 day(s) QLIKE                     : 19.21869650
5 day(s) MAE                       : 0.03453898
5 day(s) RMSE                      : 0.05503580
5 day(s) R2                        : -0.53619438
5 day(s) Pearson r                 : -0.05674600
5 day(s) QLIKE                     : 19.18286984
full horizon MAE                   : 0.03453898
full horizon RMSE                  : 0.05503580
full horizon R2                    : -0.53619438
full horizon Pearson r             : -0.05674600
full horizon QLIKE                 : 19.18286984

--- Task 3 ---
1 day(s) MAE                       : 0.56990036
1 day(s) RMSE                      : 0.78482498
1 day(s) R2                        : -3.56011921
1 day(s) Pearson r                 : -0.00184255
1 day(s) QLIKE                     : 12.09273454
3 day(s) MAE                       : 0.60823744
3 day(s) RMSE                      : 0.88553489
3 day(s) R2                        : -4.80492185
3 day(s) Pearson r                 : -0.02649008
3 day(s) QLIKE                     : 12.89113484
5 day(s) MAE                       : 0.59794769
5 day(s) RMSE                      : 0.86918115
5 day(s) R2                        : -4.59121664
5 day(s) Pearson r                 : -0.01115928
5 day(s) QLIKE                     : 12.82983336
full horizon MAE                   : 0.59794769
full horizon RMSE                  : 0.86918115
full horizon R2                    : -4.59121664
full horizon Pearson r             : -0.01115928
full horizon QLIKE                 : 12.82983336

--- Task 4 ---
1 day(s) MAE                       : 0.03445251
1 day(s) RMSE                      : 0.06055503
1 day(s) R2                        : -0.31251417
1 day(s) Pearson r                 : -0.15387042
1 day(s) QLIKE                     : 10.83778798
3 day(s) MAE                       : 0.03502128
3 day(s) RMSE                      : 0.06066420
3 day(s) R2                        : -0.31705271
3 day(s) Pearson r                 : -0.16834435
3 day(s) QLIKE                     : 10.89924912
5 day(s) MAE                       : 0.03525375
5 day(s) RMSE                      : 0.06068626
5 day(s) R2                        : -0.31765357
5 day(s) Pearson r                 : -0.17277686
5 day(s) QLIKE                     : 10.87843544
full horizon MAE                   : 0.03525375
full horizon RMSE                  : 0.06068626
full horizon R2                    : -0.31765357
full horizon Pearson r             : -0.17277686
full horizon QLIKE                 : 10.87843544

--- Task 5 ---
1 day(s) MAE                       : 0.00680682
1 day(s) RMSE                      : 0.01016224
1 day(s) R2                        : -1.97614131
1 day(s) Pearson r                 : 0.15917150
1 day(s) QLIKE                     : 16.50622822
3 day(s) MAE                       : 0.00677319
3 day(s) RMSE                      : 0.01001334
3 day(s) R2                        : -1.88956359
3 day(s) Pearson r                 : 0.14623142
3 day(s) QLIKE                     : 16.59934776
5 day(s) MAE                       : 0.00675100
5 day(s) RMSE                      : 0.00995291
5 day(s) R2                        : -1.85479179
5 day(s) Pearson r                 : 0.13780777
5 day(s) QLIKE                     : 16.62424435
full horizon MAE                   : 0.00675100
full horizon RMSE                  : 0.00995291
full horizon R2                    : -1.85479179
full horizon Pearson r             : 0.13780777
full horizon QLIKE                 : 16.62424435

--- Task 6 ---
1 day(s) MAE                       : 1.39770881
1 day(s) RMSE                      : 3.50928164
1 day(s) R2                        : -7.05951874
1 day(s) Pearson r                 : 0.16049505
1 day(s) QLIKE                     : 0.04080721
3 day(s) MAE                       : 1.38560642
3 day(s) RMSE                      : 3.52456736
3 day(s) R2                        : -7.12701464
3 day(s) Pearson r                 : 0.15534830
3 day(s) QLIKE                     : 0.04036534
5 day(s) MAE                       : 1.38399784
5 day(s) RMSE                      : 3.57450792
5 day(s) R2                        : -7.35452717
5 day(s) Pearson r                 : 0.15575666
5 day(s) QLIKE                     : 0.04021022
full horizon MAE                   : 1.38399784
full horizon RMSE                  : 3.57450792
full horizon R2                    : -7.35452717
full horizon Pearson r             : 0.15575666
full horizon QLIKE                 : 0.04021022

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/EURUSD/Custom_KAN_H5.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00295811, max=1.07163

=== EURUSD | H=5 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 3782
Time steps for y: 5
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525025
  Min value:  -0.7148707238463011
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.747698926836269
  Min value:  -3.7204515821714925
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.408921165171427
  Min value:  -0.7103978753601863
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3194271117573377
  Min value:  -2.977504877870329
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144182
  Min value:  -0.7173043382357188
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3838234229198108
  Min value:  -5.352418327671095
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 153s 332ms/step - loss: 0.6811 - val_loss: 1.2708 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 21s 247ms/step - loss: 0.4777 - val_loss: 1.4143 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 22s 254ms/step - loss: 0.4549 - val_loss: 1.1574 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 21s 247ms/step - loss: 0.4427 - val_loss: 1.0217 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 24s 275ms/step - loss: 0.4386 - val_loss: 0.9786 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 23s 266ms/step - loss: 0.4343 - val_loss: 1.0142 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 22s 256ms/step - loss: 0.4306 - val_loss: 0.9575 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 29s 334ms/step - loss: 0.4311 - val_loss: 0.9709 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 27s 319ms/step - loss: 0.4260 - val_loss: 0.9669 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=10 tf_ratio=0.526 -> TF=ON
Epoch 10/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 29s 340ms/step - loss: 0.4238 - val_loss: 0.9894 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=11 tf_ratio=0.474 -> TF=ON
Epoch 11/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 26s 303ms/step - loss: 0.4220 - val_loss: 0.9850 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=12 tf_ratio=0.421 -> TF=ON
Epoch 12/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 0s 288ms/step - loss: 0.4234
Epoch 12: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
86/86 ━━━━━━━━━━━━━━━━━━━━ 25s 295ms/step - loss: 0.4218 - val_loss: 0.9781 - learning_rate: 5.0000e-04
Epoch 12: early stopping
Restoring model weights from the end of the best epoch: 7.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 5
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.11508104
1 day(s) RMSE                      : 0.21192773
1 day(s) R2                        : 0.01599360
1 day(s) Pearson r                 : 0.42510042
1 day(s) QLIKE                     : 0.48691850
3 day(s) MAE                       : 0.11795942
3 day(s) RMSE                      : 0.21378389
3 day(s) R2                        : -0.00904358
3 day(s) Pearson r                 : 0.38456948
3 day(s) QLIKE                     : 0.51411824
5 day(s) MAE                       : 0.11939818
5 day(s) RMSE                      : 0.21495711
5 day(s) R2                        : -0.03174146
5 day(s) Pearson r                 : 0.36901550
5 day(s) QLIKE                     : 0.52225088
full horizon MAE                   : 0.11939818
full horizon RMSE                  : 0.21495711
full horizon R2                    : -0.03174146
full horizon Pearson r             : 0.36901550
full horizon QLIKE                 : 0.52225088

--- Task 2 ---
1 day(s) MAE                       : 0.03459759
1 day(s) RMSE                      : 0.05621061
1 day(s) R2                        : -0.60396187
1 day(s) Pearson r                 : -0.14581363
1 day(s) QLIKE                     : 11.89216410
3 day(s) MAE                       : 0.03456889
3 day(s) RMSE                      : 0.05622003
3 day(s) R2                        : -0.60409403
3 day(s) Pearson r                 : -0.13038951
3 day(s) QLIKE                     : 13.41048951
5 day(s) MAE                       : 0.03451723
5 day(s) RMSE                      : 0.05621673
5 day(s) R2                        : -0.60282775
5 day(s) Pearson r                 : -0.10023806
5 day(s) QLIKE                     : 12.53446312
full horizon MAE                   : 0.03451723
full horizon RMSE                  : 0.05621673
full horizon R2                    : -0.60282775
full horizon Pearson r             : -0.10023806
full horizon QLIKE                 : 12.53446312

--- Task 3 ---
1 day(s) MAE                       : 0.71596395
1 day(s) RMSE                      : 0.79686857
1 day(s) R2                        : -3.70114834
1 day(s) Pearson r                 : 0.17674577
1 day(s) QLIKE                     : 3.50647304
3 day(s) MAE                       : 0.74802032
3 day(s) RMSE                      : 0.83101568
3 day(s) R2                        : -4.11214846
3 day(s) Pearson r                 : 0.09622442
3 day(s) QLIKE                     : 8.22359709
5 day(s) MAE                       : 0.75510457
5 day(s) RMSE                      : 0.83842450
5 day(s) R2                        : -4.20251862
5 day(s) Pearson r                 : 0.07220476
5 day(s) QLIKE                     : 6.69851627
full horizon MAE                   : 0.75510457
full horizon RMSE                  : 0.83842450
full horizon R2                    : -4.20251862
full horizon Pearson r             : 0.07220476
full horizon QLIKE                 : 6.69851627

--- Task 4 ---
1 day(s) MAE                       : 0.03282058
1 day(s) RMSE                      : 0.06196982
1 day(s) R2                        : -0.37456129
1 day(s) Pearson r                 : -0.18103498
1 day(s) QLIKE                     : 5.00405993
3 day(s) MAE                       : 0.03322944
3 day(s) RMSE                      : 0.06189551
3 day(s) R2                        : -0.37106014
3 day(s) Pearson r                 : -0.15375524
3 day(s) QLIKE                     : 10.38550411
5 day(s) MAE                       : 0.03364608
5 day(s) RMSE                      : 0.06193931
5 day(s) R2                        : -0.37262899
5 day(s) Pearson r                 : -0.16822734
5 day(s) QLIKE                     : 10.45188847
full horizon MAE                   : 0.03364608
full horizon RMSE                  : 0.06193931
full horizon R2                    : -0.37262899
full horizon Pearson r             : -0.16822734
full horizon QLIKE                 : 10.45188847

--- Task 5 ---
1 day(s) MAE                       : 0.00512700
1 day(s) RMSE                      : 0.00780876
1 day(s) R2                        : -0.75727152
1 day(s) Pearson r                 : 0.19801251
1 day(s) QLIKE                     : 12.74594658
3 day(s) MAE                       : 0.00512743
3 day(s) RMSE                      : 0.00780943
3 day(s) R2                        : -0.75757254
3 day(s) Pearson r                 : 0.09409271
3 day(s) QLIKE                     : 13.27851958
5 day(s) MAE                       : 0.00512752
5 day(s) RMSE                      : 0.00780957
5 day(s) R2                        : -0.75763312
5 day(s) Pearson r                 : 0.07057009
5 day(s) QLIKE                     : 11.96404282
full horizon MAE                   : 0.00512752
full horizon RMSE                  : 0.00780957
full horizon R2                    : -0.75763312
full horizon Pearson r             : 0.07057009
full horizon QLIKE                 : 11.96404282

--- Task 6 ---
1 day(s) MAE                       : 1.44011873
1 day(s) RMSE                      : 1.75067457
1 day(s) R2                        : -1.00578096
1 day(s) Pearson r                 : 0.15147746
1 day(s) QLIKE                     : 0.03812034
3 day(s) MAE                       : 1.47609494
3 day(s) RMSE                      : 1.79712102
3 day(s) R2                        : -1.11287623
3 day(s) Pearson r                 : 0.17045998
3 day(s) QLIKE                     : 0.03791437
5 day(s) MAE                       : 1.49292318
5 day(s) RMSE                      : 1.81972234
5 day(s) R2                        : -1.16520789
5 day(s) Pearson r                 : 0.20048136
5 day(s) QLIKE                     : 0.03667145
full horizon MAE                   : 1.49292318
full horizon RMSE                  : 1.81972234
full horizon R2                    : -1.16520789
full horizon Pearson r             : 0.20048136
full horizon QLIKE                 : 0.03667145

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/EURUSD/Custom_KAN_LSTM_H5.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.0458121, max=0.246393

=== EURUSD | H=10 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 10
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525025
  Min value:  -0.7148707238463011
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.7476503800701995
  Min value:  -3.719996871136402
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.408921165171427
  Min value:  -0.7103978753601863
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3197553444271937
  Min value:  -2.9770657245458123
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144182
  Min value:  -0.7173043382357188
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3841503070956698
  Min value:  -5.351929442359245
Epoch 001 | phase=1 | train_loss=4.9952 | val_main=1.209539
Epoch 002 | phase=1 | train_loss=4.6494 | val_main=1.211823
Epoch 003 | phase=1 | train_loss=4.4827 | val_main=1.212710
Epoch 004 | phase=1 | train_loss=4.3714 | val_main=1.212207
Epoch 005 | phase=1 | train_loss=4.2574 | val_main=1.212573
Epoch 006 | phase=1 | train_loss=4.1090 | val_main=1.212730
Epoch 007 | phase=1 | train_loss=3.9448 | val_main=1.213538
Epoch 008 | phase=1 | train_loss=3.8639 | val_main=1.213764
Epoch 009 | phase=1 | train_loss=3.6649 | val_main=1.214872
Epoch 010 | phase=1 | train_loss=3.5370 | val_main=1.214828
Epoch 011 | phase=1 | train_loss=3.4194 | val_main=1.214975
Epoch 012 | phase=1 | train_loss=3.2967 | val_main=1.214589
Epoch 013 | phase=1 | train_loss=3.1062 | val_main=1.214220
Epoch 014 | phase=1 | train_loss=3.0157 | val_main=1.213969
Epoch 015 | phase=1 | train_loss=2.9420 | val_main=1.214238
Epoch 016 | phase=0 | train_loss=3.7315 | val_main=0.999687
Epoch 017 | phase=0 | train_loss=3.5233 | val_main=0.912624
Epoch 018 | phase=0 | train_loss=3.3560 | val_main=0.839868
Epoch 019 | phase=0 | train_loss=3.2215 | val_main=0.740966
Epoch 020 | phase=0 | train_loss=3.0533 | val_main=0.689957
Epoch 021 | phase=0 | train_loss=2.8201 | val_main=0.602356
Epoch 022 | phase=0 | train_loss=2.6882 | val_main=0.522273
Epoch 023 | phase=0 | train_loss=2.5845 | val_main=0.446492
Epoch 024 | phase=0 | train_loss=2.3863 | val_main=0.423190
Epoch 025 | phase=0 | train_loss=2.2812 | val_main=0.406252
Epoch 026 | phase=0 | train_loss=2.2473 | val_main=0.367131
Epoch 027 | phase=0 | train_loss=2.1591 | val_main=0.356897
Epoch 028 | phase=0 | train_loss=2.1164 | val_main=0.334568
Epoch 029 | phase=0 | train_loss=2.0340 | val_main=0.352075
Epoch 030 | phase=0 | train_loss=1.9688 | val_main=0.346131
Epoch 031 | phase=2 | train_loss=0.2291 | val_main=0.341468
Epoch 032 | phase=2 | train_loss=0.2260 | val_main=0.334809
Epoch 033 | phase=2 | train_loss=0.2171 | val_main=0.332946
Epoch 034 | phase=2 | train_loss=0.2158 | val_main=0.329222
Epoch 035 | phase=2 | train_loss=0.2088 | val_main=0.328985
Epoch 036 | phase=2 | train_loss=0.2024 | val_main=0.340944
Epoch 037 | phase=2 | train_loss=0.1993 | val_main=0.333794
Epoch 038 | phase=2 | train_loss=0.1951 | val_main=0.332970
Epoch 039 | phase=2 | train_loss=0.1910 | val_main=0.346763
Epoch 040 | phase=2 | train_loss=0.1870 | val_main=0.352996
Epoch 041 | phase=2 | train_loss=0.1838 | val_main=0.352100
Epoch 042 | phase=2 | train_loss=0.1801 | val_main=0.349358
Epoch 043 | phase=2 | train_loss=0.1766 | val_main=0.349907
Epoch 044 | phase=2 | train_loss=0.1762 | val_main=0.346840
Epoch 045 | phase=2 | train_loss=0.1734 | val_main=0.364801
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 60
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.08955657
1 day(s) RMSE                      : 0.17383639
1 day(s) R2                        : 0.33793029
1 day(s) Pearson r                 : 0.58635538
1 day(s) QLIKE                     : 0.37756615
3 day(s) MAE                       : 0.09360218
3 day(s) RMSE                      : 0.18035779
3 day(s) R2                        : 0.28182583
3 day(s) Pearson r                 : 0.54199454
3 day(s) QLIKE                     : 0.39069723
5 day(s) MAE                       : 0.09585852
5 day(s) RMSE                      : 0.18485370
5 day(s) R2                        : 0.23700173
5 day(s) Pearson r                 : 0.51125463
5 day(s) QLIKE                     : 0.41188194
10 day(s) MAE                      : 0.09639125
10 day(s) RMSE                     : 0.18854423
10 day(s) R2                       : 0.19865468
10 day(s) Pearson r                : 0.47733436
10 day(s) QLIKE                    : 0.42608910
full horizon MAE                   : 0.09639125
full horizon RMSE                  : 0.18854423
full horizon R2                    : 0.19865468
full horizon Pearson r             : 0.47733436
full horizon QLIKE                 : 0.42608910

--- Task 2 ---
1 day(s) MAE                       : 0.03394429
1 day(s) RMSE                      : 0.05454115
1 day(s) R2                        : -0.51010099
1 day(s) Pearson r                 : -0.01460582
1 day(s) QLIKE                     : 19.30600768
3 day(s) MAE                       : 0.03411893
3 day(s) RMSE                      : 0.05484849
3 day(s) R2                        : -0.52678218
3 day(s) Pearson r                 : -0.02656502
3 day(s) QLIKE                     : 19.36618016
5 day(s) MAE                       : 0.03444678
5 day(s) RMSE                      : 0.05529804
5 day(s) R2                        : -0.55086923
5 day(s) Pearson r                 : -0.02905630
5 day(s) QLIKE                     : 19.44860174
10 day(s) MAE                      : 0.03393860
10 day(s) RMSE                     : 0.05506994
10 day(s) R2                       : -0.53600957
10 day(s) Pearson r                : -0.01521919
10 day(s) QLIKE                    : 19.55704515
full horizon MAE                   : 0.03393860
full horizon RMSE                  : 0.05506994
full horizon R2                    : -0.53600957
full horizon Pearson r             : -0.01521919
full horizon QLIKE                 : 19.55704515

--- Task 3 ---
1 day(s) MAE                       : 0.59723010
1 day(s) RMSE                      : 0.74930427
1 day(s) R2                        : -3.15668363
1 day(s) Pearson r                 : -0.14242218
1 day(s) QLIKE                     : 9.45261607
3 day(s) MAE                       : 0.58995913
3 day(s) RMSE                      : 0.73675230
3 day(s) R2                        : -3.01816740
3 day(s) Pearson r                 : -0.13230451
3 day(s) QLIKE                     : 9.74444909
5 day(s) MAE                       : 0.58182841
5 day(s) RMSE                      : 0.72783623
5 day(s) R2                        : -2.92060482
5 day(s) Pearson r                 : -0.12797682
5 day(s) QLIKE                     : 9.97871097
10 day(s) MAE                      : 0.56780829
10 day(s) RMSE                     : 0.71329830
10 day(s) R2                       : -2.76360800
10 day(s) Pearson r                : -0.07154204
10 day(s) QLIKE                    : 10.28755824
full horizon MAE                   : 0.56780829
full horizon RMSE                  : 0.71329830
full horizon R2                    : -2.76360800
full horizon Pearson r             : -0.07154204
full horizon QLIKE                 : 10.28755824

--- Task 4 ---
1 day(s) MAE                       : 0.03629914
1 day(s) RMSE                      : 0.06115116
1 day(s) R2                        : -0.33848322
1 day(s) Pearson r                 : -0.18496252
1 day(s) QLIKE                     : 10.92590783
3 day(s) MAE                       : 0.03660736
3 day(s) RMSE                      : 0.06117379
3 day(s) R2                        : -0.33927246
3 day(s) Pearson r                 : -0.17890431
3 day(s) QLIKE                     : 11.22085450
5 day(s) MAE                       : 0.03694254
5 day(s) RMSE                      : 0.06121161
5 day(s) R2                        : -0.34056563
5 day(s) Pearson r                 : -0.16811281
5 day(s) QLIKE                     : 11.50538090
10 day(s) MAE                      : 0.03688577
10 day(s) RMSE                     : 0.06097283
10 day(s) R2                       : -0.32929283
10 day(s) Pearson r                : -0.15331383
10 day(s) QLIKE                    : 11.72243816
full horizon MAE                   : 0.03688577
full horizon RMSE                  : 0.06097283
full horizon R2                    : -0.32929283
full horizon Pearson r             : -0.15331383
full horizon QLIKE                 : 11.72243816

--- Task 5 ---
1 day(s) MAE                       : 0.00861942
1 day(s) RMSE                      : 0.01378903
1 day(s) R2                        : -4.47951323
1 day(s) Pearson r                 : 0.20326192
1 day(s) QLIKE                     : 16.52114714
3 day(s) MAE                       : 0.00869276
3 day(s) RMSE                      : 0.01386257
3 day(s) R2                        : -4.53811347
3 day(s) Pearson r                 : 0.18441732
3 day(s) QLIKE                     : 16.54608379
5 day(s) MAE                       : 0.00877773
5 day(s) RMSE                      : 0.01397833
5 day(s) R2                        : -4.63099008
5 day(s) Pearson r                 : 0.17448125
5 day(s) QLIKE                     : 16.55070608
10 day(s) MAE                      : 0.00857087
10 day(s) RMSE                     : 0.01358219
10 day(s) R2                       : -4.31635368
10 day(s) Pearson r                : 0.15326227
10 day(s) QLIKE                    : 16.57281044
full horizon MAE                   : 0.00857087
full horizon RMSE                  : 0.01358219
full horizon R2                    : -4.31635368
full horizon Pearson r             : 0.15326227
full horizon QLIKE                 : 16.57281044

--- Task 6 ---
1 day(s) MAE                       : 1.15106844
1 day(s) RMSE                      : 1.61606181
1 day(s) R2                        : -0.70918313
1 day(s) Pearson r                 : 0.26678696
1 day(s) QLIKE                     : 0.03345758
3 day(s) MAE                       : 1.14379775
3 day(s) RMSE                      : 1.60544506
3 day(s) R2                        : -0.68620475
3 day(s) Pearson r                 : 0.26502661
3 day(s) QLIKE                     : 0.03325400
5 day(s) MAE                       : 1.13771921
5 day(s) RMSE                      : 1.60151687
5 day(s) R2                        : -0.67707462
5 day(s) Pearson r                 : 0.26830258
5 day(s) QLIKE                     : 0.03301023
10 day(s) MAE                      : 1.12100765
10 day(s) RMSE                     : 1.60640157
10 day(s) R2                       : -0.68373863
10 day(s) Pearson r                : 0.28260034
10 day(s) QLIKE                    : 0.03234772
full horizon MAE                   : 1.12100765
full horizon RMSE                  : 1.60640157
full horizon R2                    : -0.68373863
full horizon Pearson r             : 0.28260034
full horizon QLIKE                 : 0.03234772

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/EURUSD/Custom_KAN_H10.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00297112, max=1.32878

=== EURUSD | H=10 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 3782
Time steps for y: 10
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525025
  Min value:  -0.7148707238463011
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.7476503800701995
  Min value:  -3.719996871136402
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.408921165171427
  Min value:  -0.7103978753601863
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3197553444271937
  Min value:  -2.9770657245458123
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144182
  Min value:  -0.7173043382357188
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3841503070956698
  Min value:  -5.351929442359245
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 174s 395ms/step - loss: 0.6320 - val_loss: 1.2921 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 28s 322ms/step - loss: 0.4132 - val_loss: 1.4350 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 28s 325ms/step - loss: 0.3891 - val_loss: 1.3263 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 28s 321ms/step - loss: 0.3775 - val_loss: 1.0932 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 28s 327ms/step - loss: 0.3682 - val_loss: 1.2110 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 28s 326ms/step - loss: 0.3616 - val_loss: 1.0926 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 28s 324ms/step - loss: 0.3587 - val_loss: 0.9953 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 30s 345ms/step - loss: 0.3543 - val_loss: 1.0238 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 34s 395ms/step - loss: 0.3464 - val_loss: 1.0783 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=10 tf_ratio=0.526 -> TF=ON
Epoch 10/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 32s 370ms/step - loss: 0.3421 - val_loss: 1.0344 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=11 tf_ratio=0.474 -> TF=ON
Epoch 11/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 30s 346ms/step - loss: 0.3378 - val_loss: 1.0170 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=12 tf_ratio=0.421 -> TF=ON
Epoch 12/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 0s 370ms/step - loss: 0.3362
Epoch 12: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
86/86 ━━━━━━━━━━━━━━━━━━━━ 33s 385ms/step - loss: 0.3346 - val_loss: 1.1068 - learning_rate: 5.0000e-04
Epoch 12: early stopping
Restoring model weights from the end of the best epoch: 7.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 10
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.11025227
1 day(s) RMSE                      : 0.20677671
1 day(s) R2                        : 0.06324592
1 day(s) Pearson r                 : 0.41293901
1 day(s) QLIKE                     : 0.48962977
3 day(s) MAE                       : 0.11721950
3 day(s) RMSE                      : 0.21552583
3 day(s) R2                        : -0.02555420
3 day(s) Pearson r                 : 0.34708090
3 day(s) QLIKE                     : 0.51972154
5 day(s) MAE                       : 0.11756639
5 day(s) RMSE                      : 0.21659161
5 day(s) R2                        : -0.04749152
5 day(s) Pearson r                 : 0.33023322
5 day(s) QLIKE                     : 0.51862278
10 day(s) MAE                      : 0.11649309
10 day(s) RMSE                     : 0.21569807
10 day(s) R2                       : -0.04878320
10 day(s) Pearson r                : 0.30813985
10 day(s) QLIKE                    : 0.51944667
full horizon MAE                   : 0.11649309
full horizon RMSE                  : 0.21569807
full horizon R2                    : -0.04878320
full horizon Pearson r             : 0.30813985
full horizon QLIKE                 : 0.51944667

--- Task 2 ---
1 day(s) MAE                       : 0.03449850
1 day(s) RMSE                      : 0.05606925
1 day(s) R2                        : -0.59590433
1 day(s) Pearson r                 : -0.07019128
1 day(s) QLIKE                     : 13.17873974
3 day(s) MAE                       : 0.03453417
3 day(s) RMSE                      : 0.05611866
3 day(s) R2                        : -0.59831461
3 day(s) Pearson r                 : -0.09682873
3 day(s) QLIKE                     : 15.72863829
5 day(s) MAE                       : 0.03449638
5 day(s) RMSE                      : 0.05615588
5 day(s) R2                        : -0.59935974
5 day(s) Pearson r                 : -0.07249472
5 day(s) QLIKE                     : 14.41360917
10 day(s) MAE                      : 0.03441843
10 day(s) RMSE                     : 0.05616861
10 day(s) R2                       : -0.59790943
10 day(s) Pearson r                : -0.04962249
10 day(s) QLIKE                    : 12.74831435
full horizon MAE                   : 0.03441843
full horizon RMSE                  : 0.05616861
full horizon R2                    : -0.59790943
full horizon Pearson r             : -0.04962249
full horizon QLIKE                 : 12.74831435

--- Task 3 ---
1 day(s) MAE                       : 0.70544837
1 day(s) RMSE                      : 0.78466746
1 day(s) R2                        : -3.55828888
1 day(s) Pearson r                 : 0.19093147
1 day(s) QLIKE                     : 3.49976836
3 day(s) MAE                       : 0.74327997
3 day(s) RMSE                      : 0.82591043
3 day(s) R2                        : -4.04952966
3 day(s) Pearson r                 : 0.09534002
3 day(s) QLIKE                     : 7.55411821
5 day(s) MAE                       : 0.75226022
5 day(s) RMSE                      : 0.83539211
5 day(s) R2                        : -4.16495408
5 day(s) Pearson r                 : 0.06977842
5 day(s) QLIKE                     : 7.02763711
10 day(s) MAE                      : 0.75911236
10 day(s) RMSE                     : 0.84256724
10 day(s) R2                       : -4.25135214
10 day(s) Pearson r                : 0.04736674
10 day(s) QLIKE                    : 5.41821106
full horizon MAE                   : 0.75911236
full horizon RMSE                  : 0.84256724
full horizon R2                    : -4.25135214
full horizon Pearson r             : 0.04736674
full horizon QLIKE                 : 5.41821106

--- Task 4 ---
1 day(s) MAE                       : 0.03257742
1 day(s) RMSE                      : 0.06169103
1 day(s) R2                        : -0.36222126
1 day(s) Pearson r                 : -0.15440293
1 day(s) QLIKE                     : 4.50611378
3 day(s) MAE                       : 0.03234016
3 day(s) RMSE                      : 0.06115330
3 day(s) R2                        : -0.33837581
3 day(s) Pearson r                 : -0.12468555
3 day(s) QLIKE                     : 8.08213454
5 day(s) MAE                       : 0.03297594
5 day(s) RMSE                      : 0.06057024
5 day(s) R2                        : -0.31261983
5 day(s) Pearson r                 : -0.10899419
5 day(s) QLIKE                     : 11.05301469
10 day(s) MAE                      : 0.03508875
10 day(s) RMSE                     : 0.06006666
10 day(s) R2                       : -0.29007477
10 day(s) Pearson r                : -0.12380094
10 day(s) QLIKE                    : 11.52329300
full horizon MAE                   : 0.03508875
full horizon RMSE                  : 0.06006666
full horizon R2                    : -0.29007477
full horizon Pearson r             : -0.12380094
full horizon QLIKE                 : 11.52329300

--- Task 5 ---
1 day(s) MAE                       : 0.00512736
1 day(s) RMSE                      : 0.00780933
1 day(s) R2                        : -0.75752512
1 day(s) Pearson r                 : 0.41337337
1 day(s) QLIKE                     : 12.75232346
3 day(s) MAE                       : 0.00512755
3 day(s) RMSE                      : 0.00780962
3 day(s) R2                        : -0.75765564
3 day(s) Pearson r                 : 0.19518972
3 day(s) QLIKE                     : 12.68379260
5 day(s) MAE                       : 0.00512759
5 day(s) RMSE                      : 0.00780968
5 day(s) R2                        : -0.75768297
5 day(s) Pearson r                 : 0.14538551
5 day(s) QLIKE                     : 11.29248877
10 day(s) MAE                      : 0.00512762
10 day(s) RMSE                     : 0.00780972
10 day(s) R2                       : -0.75770347
10 day(s) Pearson r                : 0.10001538
10 day(s) QLIKE                    : 10.24160523
full horizon MAE                   : 0.00512762
full horizon RMSE                  : 0.00780972
full horizon R2                    : -0.75770347
full horizon Pearson r             : 0.10001538
full horizon QLIKE                 : 10.24160523

--- Task 6 ---
1 day(s) MAE                       : 1.35201625
1 day(s) RMSE                      : 1.64100959
1 day(s) R2                        : -0.76236110
1 day(s) Pearson r                 : -0.23349714
1 day(s) QLIKE                     : 0.04277081
3 day(s) MAE                       : 1.37090468
3 day(s) RMSE                      : 1.66662597
3 day(s) R2                        : -0.81717061
3 day(s) Pearson r                 : -0.25213048
3 day(s) QLIKE                     : 0.04141192
5 day(s) MAE                       : 1.36601672
5 day(s) RMSE                      : 1.66207018
5 day(s) R2                        : -0.80629243
5 day(s) Pearson r                 : -0.25393858
5 day(s) QLIKE                     : 0.03977294
10 day(s) MAE                      : 1.39220386
10 day(s) RMSE                     : 1.68251969
10 day(s) R2                       : -0.84708442
10 day(s) Pearson r                : -0.28364294
10 day(s) QLIKE                    : 0.03791710
full horizon MAE                   : 1.39220386
full horizon RMSE                  : 1.68251969
full horizon R2                    : -0.84708442
full horizon Pearson r             : -0.28364294
full horizon QLIKE                 : 0.03791710

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/EURUSD/Custom_KAN_LSTM_H10.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.0447775, max=0.226415

=== EURUSD | H=20 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3782
Time steps for y: 20
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525025
  Min value:  -0.7148707238463011
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.7474473273136155
  Min value:  -3.7200073694899167
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.408921165171427
  Min value:  -0.7103978753601863
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.321220211077452
  Min value:  -2.97689474283611
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144182
  Min value:  -0.7173043382357188
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3856309038737455
  Min value:  -5.35233858226595
Epoch 001 | phase=1 | train_loss=4.9931 | val_main=1.228354
Epoch 002 | phase=1 | train_loss=4.6899 | val_main=1.229844
Epoch 003 | phase=1 | train_loss=4.5086 | val_main=1.230018
Epoch 004 | phase=1 | train_loss=4.3678 | val_main=1.229933
Epoch 005 | phase=1 | train_loss=4.2063 | val_main=1.230155
Epoch 006 | phase=1 | train_loss=4.0066 | val_main=1.230126
Epoch 007 | phase=1 | train_loss=3.8909 | val_main=1.230043
Epoch 008 | phase=1 | train_loss=3.8188 | val_main=1.229935
Epoch 009 | phase=1 | train_loss=3.6176 | val_main=1.230166
Epoch 010 | phase=1 | train_loss=3.4724 | val_main=1.230071
Epoch 011 | phase=1 | train_loss=3.3855 | val_main=1.230101
Epoch 012 | phase=1 | train_loss=3.2812 | val_main=1.230185
Epoch 013 | phase=1 | train_loss=3.0935 | val_main=1.230290
Epoch 014 | phase=1 | train_loss=3.0006 | val_main=1.230316
Epoch 015 | phase=1 | train_loss=2.9403 | val_main=1.230466
Epoch 016 | phase=0 | train_loss=3.7907 | val_main=1.057823
Epoch 017 | phase=0 | train_loss=3.6006 | val_main=0.956255
Epoch 018 | phase=0 | train_loss=3.4884 | val_main=0.911961
Epoch 019 | phase=0 | train_loss=3.3676 | val_main=0.885423
Epoch 020 | phase=0 | train_loss=3.2470 | val_main=0.780974
Epoch 021 | phase=0 | train_loss=3.0758 | val_main=0.702361
Epoch 022 | phase=0 | train_loss=2.9281 | val_main=0.630526
Epoch 023 | phase=0 | train_loss=2.7464 | val_main=0.537428
Epoch 024 | phase=0 | train_loss=2.6079 | val_main=0.497743
Epoch 025 | phase=0 | train_loss=2.4630 | val_main=0.443168
Epoch 026 | phase=0 | train_loss=2.3670 | val_main=0.395190
Epoch 027 | phase=0 | train_loss=2.2428 | val_main=0.382617
Epoch 028 | phase=0 | train_loss=2.1780 | val_main=0.364068
Epoch 029 | phase=0 | train_loss=2.1193 | val_main=0.364457
Epoch 030 | phase=0 | train_loss=2.0573 | val_main=0.354162
Epoch 031 | phase=2 | train_loss=0.2527 | val_main=0.352728
Epoch 032 | phase=2 | train_loss=0.2467 | val_main=0.354946
Epoch 033 | phase=2 | train_loss=0.2404 | val_main=0.362079
Epoch 034 | phase=2 | train_loss=0.2416 | val_main=0.349136
Epoch 035 | phase=2 | train_loss=0.2310 | val_main=0.353860
Epoch 036 | phase=2 | train_loss=0.2262 | val_main=0.362826
Epoch 037 | phase=2 | train_loss=0.2233 | val_main=0.363486
Epoch 038 | phase=2 | train_loss=0.2187 | val_main=0.358472
Epoch 039 | phase=2 | train_loss=0.2169 | val_main=0.371263
Epoch 040 | phase=2 | train_loss=0.2124 | val_main=0.375643
Epoch 041 | phase=2 | train_loss=0.2090 | val_main=0.388353
Epoch 042 | phase=2 | train_loss=0.2075 | val_main=0.379832
Epoch 043 | phase=2 | train_loss=0.2035 | val_main=0.387429
Epoch 044 | phase=2 | train_loss=0.2036 | val_main=0.374385
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 120
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.09009713
1 day(s) RMSE                      : 0.17642242
1 day(s) R2                        : 0.31808558
1 day(s) Pearson r                 : 0.57007083
1 day(s) QLIKE                     : 0.38172591
3 day(s) MAE                       : 0.09477819
3 day(s) RMSE                      : 0.18557990
3 day(s) R2                        : 0.23963547
3 day(s) Pearson r                 : 0.50872398
3 day(s) QLIKE                     : 0.41914086
5 day(s) MAE                       : 0.09593604
5 day(s) RMSE                      : 0.18749946
5 day(s) R2                        : 0.21500427
5 day(s) Pearson r                 : 0.48838586
5 day(s) QLIKE                     : 0.42797432
10 day(s) MAE                      : 0.09721305
10 day(s) RMSE                     : 0.19034777
10 day(s) R2                       : 0.18325066
10 day(s) Pearson r                : 0.46195015
10 day(s) QLIKE                    : 0.43972254
20 day(s) MAE                      : 0.09967460
20 day(s) RMSE                     : 0.19377007
20 day(s) R2                       : 0.14363532
20 day(s) Pearson r                : 0.42455563
20 day(s) QLIKE                    : 0.47449161
full horizon MAE                   : 0.09967460
full horizon RMSE                  : 0.19377007
full horizon R2                    : 0.14363532
full horizon Pearson r             : 0.42455563
full horizon QLIKE                 : 0.47449161

--- Task 2 ---
1 day(s) MAE                       : 0.03838463
1 day(s) RMSE                      : 0.06354503
1 day(s) R2                        : -1.04984298
1 day(s) Pearson r                 : -0.01343853
1 day(s) QLIKE                     : 18.72059700
3 day(s) MAE                       : 0.03827613
3 day(s) RMSE                      : 0.06337762
3 day(s) R2                        : -1.03854163
3 day(s) Pearson r                 : -0.00111704
3 day(s) QLIKE                     : 18.76380126
5 day(s) MAE                       : 0.03803094
5 day(s) RMSE                      : 0.06194521
5 day(s) R2                        : -0.94612677
5 day(s) Pearson r                 : -0.00379936
5 day(s) QLIKE                     : 18.95615537
10 day(s) MAE                      : 0.03738371
10 day(s) RMSE                     : 0.05976137
10 day(s) R2                       : -0.80886365
10 day(s) Pearson r                : -0.03235211
10 day(s) QLIKE                    : 19.20117834
20 day(s) MAE                      : 0.03708690
20 day(s) RMSE                     : 0.05836105
20 day(s) R2                       : -0.72391235
20 day(s) Pearson r                : -0.04655799
20 day(s) QLIKE                    : 19.41749632
full horizon MAE                   : 0.03708690
full horizon RMSE                  : 0.05836105
full horizon R2                    : -0.72391235
full horizon Pearson r             : -0.04655799
full horizon QLIKE                 : 19.41749632

--- Task 3 ---
1 day(s) MAE                       : 0.60460476
1 day(s) RMSE                      : 0.73249674
1 day(s) R2                        : -2.97229915
1 day(s) Pearson r                 : -0.10161839
1 day(s) QLIKE                     : 7.21530196
3 day(s) MAE                       : 0.60106441
3 day(s) RMSE                      : 0.72755429
3 day(s) R2                        : -2.91846380
3 day(s) Pearson r                 : -0.10015558
3 day(s) QLIKE                     : 7.33966660
5 day(s) MAE                       : 0.59751681
5 day(s) RMSE                      : 0.72240256
5 day(s) R2                        : -2.86228471
5 day(s) Pearson r                 : -0.09401293
5 day(s) QLIKE                     : 7.47335177
10 day(s) MAE                      : 0.59498650
10 day(s) RMSE                     : 0.72714359
10 day(s) R2                       : -2.91113104
10 day(s) Pearson r                : -0.08383971
10 day(s) QLIKE                    : 7.88469071
20 day(s) MAE                      : 0.57291118
20 day(s) RMSE                     : 0.70541741
20 day(s) R2                       : -2.67942495
20 day(s) Pearson r                : -0.02135542
20 day(s) QLIKE                    : 8.05336010
full horizon MAE                   : 0.57291118
full horizon RMSE                  : 0.70541741
full horizon R2                    : -2.67942495
full horizon Pearson r             : -0.02135542
full horizon QLIKE                 : 8.05336010

--- Task 4 ---
1 day(s) MAE                       : 0.03502900
1 day(s) RMSE                      : 0.05994632
1 day(s) R2                        : -0.28625949
1 day(s) Pearson r                 : -0.10330504
1 day(s) QLIKE                     : 12.29250276
3 day(s) MAE                       : 0.03514593
3 day(s) RMSE                      : 0.06004382
3 day(s) R2                        : -0.29025275
3 day(s) Pearson r                 : -0.10922068
3 day(s) QLIKE                     : 12.55805237
5 day(s) MAE                       : 0.03536073
5 day(s) RMSE                      : 0.06013261
5 day(s) R2                        : -0.29372066
5 day(s) Pearson r                 : -0.11732994
5 day(s) QLIKE                     : 12.88158277
10 day(s) MAE                      : 0.03573982
10 day(s) RMSE                     : 0.06031226
10 day(s) R2                       : -0.30064617
10 day(s) Pearson r                : -0.12474296
10 day(s) QLIKE                    : 13.34603132
20 day(s) MAE                      : 0.03630392
20 day(s) RMSE                     : 0.06042370
20 day(s) R2                       : -0.30465091
20 day(s) Pearson r                : -0.11197950
20 day(s) QLIKE                    : 13.28275125
full horizon MAE                   : 0.03630392
full horizon RMSE                  : 0.06042370
full horizon R2                    : -0.30465091
full horizon Pearson r             : -0.11197950
full horizon QLIKE                 : 13.28275125

--- Task 5 ---
1 day(s) MAE                       : 0.00671275
1 day(s) RMSE                      : 0.01084310
1 day(s) R2                        : -2.38829357
1 day(s) Pearson r                 : 0.20225983
1 day(s) QLIKE                     : 16.60175330
3 day(s) MAE                       : 0.00703007
3 day(s) RMSE                      : 0.01152159
3 day(s) R2                        : -2.82559855
3 day(s) Pearson r                 : 0.18999992
3 day(s) QLIKE                     : 16.65648222
5 day(s) MAE                       : 0.00723832
5 day(s) RMSE                      : 0.01176297
5 day(s) R2                        : -2.98757260
5 day(s) Pearson r                 : 0.18685414
5 day(s) QLIKE                     : 16.70222855
10 day(s) MAE                      : 0.00720208
10 day(s) RMSE                     : 0.01150401
10 day(s) R2                       : -2.81393416
10 day(s) Pearson r                : 0.17248243
10 day(s) QLIKE                    : 16.74344322
20 day(s) MAE                      : 0.00716413
20 day(s) RMSE                     : 0.01136058
20 day(s) R2                       : -2.72382232
20 day(s) Pearson r                : 0.13274517
20 day(s) QLIKE                    : 16.62703415
full horizon MAE                   : 0.00716413
full horizon RMSE                  : 0.01136058
full horizon R2                    : -2.72382232
full horizon Pearson r             : 0.13274517
full horizon QLIKE                 : 16.62703415

--- Task 6 ---
1 day(s) MAE                       : 1.23455848
1 day(s) RMSE                      : 2.19251113
1 day(s) R2                        : -2.14598229
1 day(s) Pearson r                 : 0.19084434
1 day(s) QLIKE                     : 0.03734005
3 day(s) MAE                       : 1.23855078
3 day(s) RMSE                      : 2.20149234
3 day(s) R2                        : -2.17068991
3 day(s) Pearson r                 : 0.18349393
3 day(s) QLIKE                     : 0.03796776
5 day(s) MAE                       : 1.22943624
5 day(s) RMSE                      : 2.18842316
5 day(s) R2                        : -2.13149667
5 day(s) Pearson r                 : 0.19012883
5 day(s) QLIKE                     : 0.03746893
10 day(s) MAE                      : 1.22112602
10 day(s) RMSE                     : 2.19532118
10 day(s) R2                       : -2.14457943
10 day(s) Pearson r                : 0.22438347
10 day(s) QLIKE                    : 0.03681021
20 day(s) MAE                      : 1.20508909
20 day(s) RMSE                     : 2.27450790
20 day(s) R2                       : -2.37516445
20 day(s) Pearson r                : 0.30071316
20 day(s) QLIKE                    : 0.03498644
full horizon MAE                   : 1.20508909
full horizon RMSE                  : 2.27450790
full horizon R2                    : -2.37516445
full horizon Pearson r             : 0.30071316
full horizon QLIKE                 : 0.03498644

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/EURUSD/Custom_KAN_H20.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.00325541, max=1.00685

=== EURUSD | H=20 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 3782
Time steps for y: 20
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (2723, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  19.199765452525025
  Min value:  -0.7148707238463011
Checking X_time_train_core:
Shape: (2723, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.7683468970942753
Checking y_train_core (log_mse scaled):
Shape: (2723, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.7474473273136155
  Min value:  -3.7200073694899167
Checking X_price_val:
Shape: (302, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  7.408921165171427
  Min value:  -0.7103978753601863
Checking X_time_val:
Shape: (302, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_val (log_mse scaled):
Shape: (302, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.321220211077452
  Min value:  -2.97689474283611
Checking X_price_test:
Shape: (757, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  8.131631151144182
  Min value:  -0.7173043382357188
Checking X_time_test:
Shape: (757, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.474527252926688
  Min value:  -1.4974014475059652
Checking y_test (log_mse scaled):
Shape: (757, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.3856309038737455
  Min value:  -5.35233858226595
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 175s 470ms/step - loss: 0.6134 - val_loss: 1.8066 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 41s 471ms/step - loss: 0.3827 - val_loss: 2.0618 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 44s 516ms/step - loss: 0.3584 - val_loss: 2.3161 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 49s 567ms/step - loss: 0.3443 - val_loss: 2.1891 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 43s 496ms/step - loss: 0.3323 - val_loss: 1.9408 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
86/86 ━━━━━━━━━━━━━━━━━━━━ 0s 462ms/step - loss: 0.3247
Epoch 6: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
86/86 ━━━━━━━━━━━━━━━━━━━━ 41s 477ms/step - loss: 0.3184 - val_loss: 2.0848 - learning_rate: 5.0000e-04
Epoch 6: early stopping
Restoring model weights from the end of the best epoch: 1.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 20
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.12621655
1 day(s) RMSE                      : 0.22828548
1 day(s) R2                        : -0.14177078
1 day(s) Pearson r                 : -0.35684470
1 day(s) QLIKE                     : 0.58543800
3 day(s) MAE                       : 0.12528325
3 day(s) RMSE                      : 0.22673438
3 day(s) R2                        : -0.13499704
3 day(s) Pearson r                 : 0.00180501
3 day(s) QLIKE                     : 0.58395910
5 day(s) MAE                       : 0.12892220
5 day(s) RMSE                      : 0.23054307
5 day(s) R2                        : -0.18678324
5 day(s) Pearson r                 : 0.00912402
5 day(s) QLIKE                     : 0.59614609
10 day(s) MAE                      : 0.13639083
10 day(s) RMSE                     : 0.23796285
10 day(s) R2                       : -0.27647269
10 day(s) Pearson r                : 0.00560534
10 day(s) QLIKE                    : 0.61038971
20 day(s) MAE                      : 0.14013057
20 day(s) RMSE                     : 0.24118429
20 day(s) R2                       : -0.32673251
20 day(s) Pearson r                : 0.00533730
20 day(s) QLIKE                    : 0.59609619
full horizon MAE                   : 0.14013057
full horizon RMSE                  : 0.24118429
full horizon R2                    : -0.32673251
full horizon Pearson r             : 0.00533730
full horizon QLIKE                 : 0.59609619

--- Task 2 ---
1 day(s) MAE                       : 0.03453400
1 day(s) RMSE                      : 0.05623568
1 day(s) R2                        : -0.60539263
1 day(s) Pearson r                 : 0.04542675
1 day(s) QLIKE                     : 9.83683956
3 day(s) MAE                       : 0.03452233
3 day(s) RMSE                      : 0.05623314
3 day(s) R2                        : -0.60484227
3 day(s) Pearson r                 : 0.00169777
3 day(s) QLIKE                     : 11.34376983
5 day(s) MAE                       : 0.03448912
5 day(s) RMSE                      : 0.05622460
5 day(s) R2                        : -0.60327662
5 day(s) Pearson r                 : 0.00159883
5 day(s) QLIKE                     : 13.60663065
10 day(s) MAE                      : 0.03441481
10 day(s) RMSE                     : 0.05620298
10 day(s) R2                       : -0.59986521
10 day(s) Pearson r                : 0.00171224
10 day(s) QLIKE                    : 12.18016071
20 day(s) MAE                      : 0.03437521
20 day(s) RMSE                     : 0.05619073
20 day(s) R2                       : -0.59807948
20 day(s) Pearson r                : 0.00140936
20 day(s) QLIKE                    : 11.31028098
full horizon MAE                   : 0.03437521
full horizon RMSE                  : 0.05619073
full horizon R2                    : -0.59807948
full horizon Pearson r             : 0.00140936
full horizon QLIKE                 : 11.31028098

--- Task 3 ---
1 day(s) MAE                       : 0.74835805
1 day(s) RMSE                      : 0.82834649
1 day(s) R2                        : -4.07989375
1 day(s) Pearson r                 : 0.15503720
1 day(s) QLIKE                     : 3.33582382
3 day(s) MAE                       : 0.75943980
3 day(s) RMSE                      : 0.84184971
3 day(s) R2                        : -4.24631253
3 day(s) Pearson r                 : 0.00191491
3 day(s) QLIKE                     : 6.04949685
5 day(s) MAE                       : 0.76195607
5 day(s) RMSE                      : 0.84488436
5 day(s) R2                        : -4.28299586
5 day(s) Pearson r                 : 0.00121785
5 day(s) QLIKE                     : 7.04444171
10 day(s) MAE                      : 0.76396031
10 day(s) RMSE                     : 0.84728648
10 day(s) R2                       : -4.31034288
10 day(s) Pearson r                : 0.00063474
10 day(s) QLIKE                    : 6.39352012
20 day(s) MAE                      : 0.76500637
20 day(s) RMSE                     : 0.84853616
20 day(s) R2                       : -4.32388057
20 day(s) Pearson r                : 0.00034776
20 day(s) QLIKE                    : 5.22453997
full horizon MAE                   : 0.76500637
full horizon RMSE                  : 0.84853616
full horizon R2                    : -4.32388057
full horizon Pearson r             : 0.00034776
full horizon QLIKE                 : 5.22453997

--- Task 4 ---
1 day(s) MAE                       : 0.03287545
1 day(s) RMSE                      : 0.06210912
1 day(s) R2                        : -0.38074757
1 day(s) Pearson r                 : -0.10938982
1 day(s) QLIKE                     : 2.92223937
3 day(s) MAE                       : 0.03303926
3 day(s) RMSE                      : 0.06226541
3 day(s) R2                        : -0.38749646
3 day(s) Pearson r                 : -0.00585664
3 day(s) QLIKE                     : 3.03322321
5 day(s) MAE                       : 0.03316315
5 day(s) RMSE                      : 0.06236573
5 day(s) R2                        : -0.39159352
5 day(s) Pearson r                 : -0.00274955
5 day(s) QLIKE                     : 5.78207657
10 day(s) MAE                      : 0.03323645
10 day(s) RMSE                     : 0.06244003
10 day(s) R2                       : -0.39403637
10 day(s) Pearson r                : -0.00121593
10 day(s) QLIKE                    : 6.71191457
20 day(s) MAE                      : 0.03325915
20 day(s) RMSE                     : 0.06247653
20 day(s) R2                       : -0.39480471
20 day(s) Pearson r                : -0.00057527
20 day(s) QLIKE                    : 5.24936592
full horizon MAE                   : 0.03325915
full horizon RMSE                  : 0.06247653
full horizon R2                    : -0.39480471
full horizon Pearson r             : -0.00057527
full horizon QLIKE                 : 5.24936592

--- Task 5 ---
1 day(s) MAE                       : 0.00512764
1 day(s) RMSE                      : 0.00780973
1 day(s) R2                        : -0.75770525
1 day(s) Pearson r                 : 0.29094500
1 day(s) QLIKE                     : 8.96221489
3 day(s) MAE                       : 0.00512618
3 day(s) RMSE                      : 0.00780467
3 day(s) R2                        : -0.75542818
3 day(s) Pearson r                 : 0.00326506
3 day(s) QLIKE                     : 10.92019593
5 day(s) MAE                       : 0.00510231
5 day(s) RMSE                      : 0.00771895
5 day(s) R2                        : -0.71707943
5 day(s) Pearson r                 : 0.00067700
5 day(s) QLIKE                     : 12.56349337
10 day(s) MAE                      : 0.00498674
10 day(s) RMSE                     : 0.00705669
10 day(s) R2                       : -0.43508172
10 day(s) Pearson r                : 0.00033249
10 day(s) QLIKE                    : 14.66265984
20 day(s) MAE                      : 0.00525605
20 day(s) RMSE                     : 0.00674359
20 day(s) R2                       : -0.31210955
20 day(s) Pearson r                : 0.00165549
20 day(s) QLIKE                    : 15.79372853
full horizon MAE                   : 0.00525605
full horizon RMSE                  : 0.00674359
full horizon R2                    : -0.31210955
full horizon Pearson r             : 0.00165549
full horizon QLIKE                 : 15.79372853

--- Task 6 ---
1 day(s) MAE                       : 1.29332628
1 day(s) RMSE                      : 1.54448298
1 day(s) R2                        : -0.56112944
1 day(s) Pearson r                 : 0.06653848
1 day(s) QLIKE                     : 0.02770916
3 day(s) MAE                       : 1.57746247
3 day(s) RMSE                      : 1.83645797
3 day(s) R2                        : -1.20638551
3 day(s) Pearson r                 : 0.00553590
3 day(s) QLIKE                     : 0.02907965
5 day(s) MAE                       : 1.80778598
5 day(s) RMSE                      : 2.07595632
5 day(s) R2                        : -1.81790126
5 day(s) Pearson r                 : 0.01127241
5 day(s) QLIKE                     : 0.03008544
10 day(s) MAE                      : 1.87738158
10 day(s) RMSE                     : 2.13972317
10 day(s) R2                       : -1.98731906
10 day(s) Pearson r                : 0.04485066
10 day(s) QLIKE                    : 0.02949967
20 day(s) MAE                      : 2.03368681
20 day(s) RMSE                     : 2.31306671
20 day(s) R2                       : -2.49057002
20 day(s) Pearson r                : 0.07847771
20 day(s) QLIKE                    : 0.03154199
full horizon MAE                   : 2.03368681
full horizon RMSE                  : 2.31306671
full horizon R2                    : -2.49057002
full horizon Pearson r             : 0.07847771
full horizon QLIKE                 : 0.03154199

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/EURUSD/Custom_KAN_LSTM_H20.pkl

Saved y_true min=9.56189e-05, max=2.66457
Saved y_pred min=0.0606639, max=0.117904

=== GOLD | H=1 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 5534
Time steps for y: 1
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  17.63876776611196
  Min value:  -0.5790835075307484
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.253814902458322
  Min value:  -5.250298730157774
Checking X_price_val:
Shape: (443, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722398346
  Min value:  -0.5755730380699846
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.7902797110229687
  Min value:  -5.155379024632684
Checking X_price_test:
Shape: (1107, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698947
  Min value:  -0.5768892216495461
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5405778347636003
  Min value:  -5.155379024632684
Epoch 001 | phase=1 | train_loss=4.8095 | val_main=0.486445
Epoch 002 | phase=1 | train_loss=4.4237 | val_main=0.486358
Epoch 003 | phase=1 | train_loss=4.2331 | val_main=0.486345
Epoch 004 | phase=1 | train_loss=4.1420 | val_main=0.486436
Epoch 005 | phase=1 | train_loss=4.0132 | val_main=0.486476
Epoch 006 | phase=1 | train_loss=3.9100 | val_main=0.486434
Epoch 007 | phase=1 | train_loss=3.8297 | val_main=0.486468
Epoch 008 | phase=1 | train_loss=3.8065 | val_main=0.486537
Epoch 009 | phase=1 | train_loss=3.7791 | val_main=0.486505
Epoch 010 | phase=1 | train_loss=3.6620 | val_main=0.486419
Epoch 011 | phase=1 | train_loss=3.5839 | val_main=0.486099
Epoch 012 | phase=1 | train_loss=3.5572 | val_main=0.486245
Epoch 013 | phase=1 | train_loss=3.4315 | val_main=0.486175
Epoch 014 | phase=1 | train_loss=3.3788 | val_main=0.486212
Epoch 015 | phase=1 | train_loss=3.2905 | val_main=0.486131
Epoch 016 | phase=0 | train_loss=4.1266 | val_main=0.455466
Epoch 017 | phase=0 | train_loss=3.8001 | val_main=0.398078
Epoch 018 | phase=0 | train_loss=3.4527 | val_main=0.321515
Epoch 019 | phase=0 | train_loss=3.3010 | val_main=0.312507
Epoch 020 | phase=0 | train_loss=3.1889 | val_main=0.290006
Epoch 021 | phase=0 | train_loss=3.0173 | val_main=0.286137
Epoch 022 | phase=0 | train_loss=2.8938 | val_main=0.274189
Epoch 023 | phase=0 | train_loss=2.8323 | val_main=0.267696
Epoch 024 | phase=0 | train_loss=2.7501 | val_main=0.259894
Epoch 025 | phase=0 | train_loss=2.7323 | val_main=0.249145
Epoch 026 | phase=0 | train_loss=2.5545 | val_main=0.263837
Epoch 027 | phase=0 | train_loss=2.5070 | val_main=0.251098
Epoch 028 | phase=0 | train_loss=2.3893 | val_main=0.253022
Epoch 029 | phase=0 | train_loss=2.3626 | val_main=0.254703
Epoch 030 | phase=0 | train_loss=2.3573 | val_main=0.284536
Epoch 031 | phase=2 | train_loss=0.1552 | val_main=0.274398
Epoch 032 | phase=2 | train_loss=0.1314 | val_main=0.279163
Epoch 033 | phase=2 | train_loss=0.1260 | val_main=0.273155
Epoch 034 | phase=2 | train_loss=0.1120 | val_main=0.279139
Epoch 035 | phase=2 | train_loss=0.1098 | val_main=0.292047
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 6
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.30937749
1 day(s) RMSE                      : 0.59588361
1 day(s) R2                        : 0.17315826
1 day(s) Pearson r                 : 0.44755197
1 day(s) QLIKE                     : 0.51382151
full horizon MAE                   : 0.30937749
full horizon RMSE                  : 0.59588361
full horizon R2                    : 0.17315826
full horizon Pearson r             : 0.44755197
full horizon QLIKE                 : 0.51382151

--- Task 2 ---
1 day(s) MAE                       : 0.03767039
1 day(s) RMSE                      : 0.05816697
1 day(s) R2                        : -0.60826871
1 day(s) Pearson r                 : 0.21189069
1 day(s) QLIKE                     : 15.20939514
full horizon MAE                   : 0.03767039
full horizon RMSE                  : 0.05816697
full horizon R2                    : -0.60826871
full horizon Pearson r             : 0.21189069
full horizon QLIKE                 : 15.20939514

--- Task 3 ---
1 day(s) MAE                       : 0.31569860
1 day(s) RMSE                      : 0.47907707
1 day(s) R2                        : -1.19345699
1 day(s) Pearson r                 : -0.23258487
1 day(s) QLIKE                     : 10.38697984
full horizon MAE                   : 0.31569860
full horizon RMSE                  : 0.47907707
full horizon R2                    : -1.19345699
full horizon Pearson r             : -0.23258487
full horizon QLIKE                 : 10.38697984

--- Task 4 ---
1 day(s) MAE                       : 0.12188845
1 day(s) RMSE                      : 0.25595767
1 day(s) R2                        : -0.27888842
1 day(s) Pearson r                 : -0.16293585
1 day(s) QLIKE                     : 10.30833077
full horizon MAE                   : 0.12188845
full horizon RMSE                  : 0.25595767
full horizon R2                    : -0.27888842
full horizon Pearson r             : -0.16293585
full horizon QLIKE                 : 10.30833077

--- Task 5 ---
1 day(s) MAE                       : 0.04075025
1 day(s) RMSE                      : 0.05984030
1 day(s) R2                        : -0.78453498
1 day(s) Pearson r                 : -0.01518305
1 day(s) QLIKE                     : 14.64244544
full horizon MAE                   : 0.04075025
full horizon RMSE                  : 0.05984030
full horizon R2                    : -0.78453498
full horizon Pearson r             : -0.01518305
full horizon QLIKE                 : 14.64244544

--- Task 6 ---
1 day(s) MAE                       : 0.97158885
1 day(s) RMSE                      : 1.92563735
1 day(s) R2                        : -10.59983834
1 day(s) Pearson r                 : -0.06069739
1 day(s) QLIKE                     : 0.04418244
full horizon MAE                   : 0.97158885
full horizon RMSE                  : 1.92563735
full horizon R2                    : -10.59983834
full horizon Pearson r             : -0.06069739
full horizon QLIKE                 : 0.04418244

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/GOLD/Custom_KAN_H1.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.00456254, max=2.08821

=== GOLD | H=1 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 5534
Time steps for y: 1
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  17.63876776611196
  Min value:  -0.5790835075307484
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.253814902458322
  Min value:  -5.250298730157774
Checking X_price_val:
Shape: (443, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722398346
  Min value:  -0.5755730380699846
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.7902797110229687
  Min value:  -5.155379024632684
Checking X_price_test:
Shape: (1107, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698947
  Min value:  -0.5768892216495461
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5405778347636003
  Min value:  -5.155379024632684
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 170s 278ms/step - loss: 0.9819 - val_loss: 0.4949 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 24s 188ms/step - loss: 0.9328 - val_loss: 0.4904 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 24s 188ms/step - loss: 0.9376 - val_loss: 0.5537 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 24s 190ms/step - loss: 0.9440 - val_loss: 0.4549 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 24s 190ms/step - loss: 0.9471 - val_loss: 0.4961 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 23s 188ms/step - loss: 0.9538 - val_loss: 0.5113 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 26s 210ms/step - loss: 0.9184 - val_loss: 0.4754 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 30s 237ms/step - loss: 0.9252 - val_loss: 0.4880 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 0s 220ms/step - loss: 0.9374
Epoch 9: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
125/125 ━━━━━━━━━━━━━━━━━━━━ 28s 226ms/step - loss: 0.9429 - val_loss: 0.4898 - learning_rate: 5.0000e-04
Epoch 9: early stopping
Restoring model weights from the end of the best epoch: 4.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 1
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.40801242
1 day(s) RMSE                      : 0.68180273
1 day(s) R2                        : -0.08247278
1 day(s) Pearson r                 : 0.35730863
1 day(s) QLIKE                     : 0.49933048
full horizon MAE                   : 0.40801242
full horizon RMSE                  : 0.68180273
full horizon R2                    : -0.08247278
full horizon Pearson r             : 0.35730863
full horizon QLIKE                 : 0.49933048

--- Task 2 ---
1 day(s) MAE                       : 0.04010084
1 day(s) RMSE                      : 0.06087685
1 day(s) R2                        : -0.76161096
1 day(s) Pearson r                 : 0.08691458
1 day(s) QLIKE                     : 10.86233446
full horizon MAE                   : 0.04010084
full horizon RMSE                  : 0.06087685
full horizon R2                    : -0.76161096
full horizon Pearson r             : 0.08691458
full horizon QLIKE                 : 10.86233446

--- Task 3 ---
1 day(s) MAE                       : 0.42627181
1 day(s) RMSE                      : 0.51775649
1 day(s) R2                        : -1.56194297
1 day(s) Pearson r                 : 0.15360883
1 day(s) QLIKE                     : 3.66665696
full horizon MAE                   : 0.42627181
full horizon RMSE                  : 0.51775649
full horizon R2                    : -1.56194297
full horizon Pearson r             : 0.15360883
full horizon QLIKE                 : 3.66665696

--- Task 4 ---
1 day(s) MAE                       : 0.12105574
1 day(s) RMSE                      : 0.25655662
1 day(s) R2                        : -0.28488070
1 day(s) Pearson r                 : -0.03855730
1 day(s) QLIKE                     : 2.17782323
full horizon MAE                   : 0.12105574
full horizon RMSE                  : 0.25655662
full horizon R2                    : -0.28488070
full horizon Pearson r             : -0.03855730
full horizon QLIKE                 : 2.17782323

--- Task 5 ---
1 day(s) MAE                       : 0.03992821
1 day(s) RMSE                      : 0.05996898
1 day(s) R2                        : -0.79221839
1 day(s) Pearson r                 : 0.24440235
1 day(s) QLIKE                     : 5.85887460
full horizon MAE                   : 0.03992821
full horizon RMSE                  : 0.05996898
full horizon R2                    : -0.79221839
full horizon Pearson r             : 0.24440235
full horizon QLIKE                 : 5.85887460

--- Task 6 ---
1 day(s) MAE                       : 0.66812460
1 day(s) RMSE                      : 0.80557110
1 day(s) R2                        : -1.03006846
1 day(s) Pearson r                 : 0.02896732
1 day(s) QLIKE                     : 0.02469974
full horizon MAE                   : 0.66812460
full horizon RMSE                  : 0.80557110
full horizon R2                    : -1.03006846
full horizon Pearson r             : 0.02896732
full horizon QLIKE                 : 0.02469974

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/GOLD/Custom_KAN_LSTM_H1.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.273148, max=0.979725

=== GOLD | H=5 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 5534
Time steps for y: 5
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  17.63876776611196
  Min value:  -0.5790835075307484
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.254005191594221
  Min value:  -5.250623802304656
Checking X_price_val:
Shape: (443, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722398346
  Min value:  -0.5755730380699846
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.790672308331636
  Min value:  -5.15537980838342
Checking X_price_test:
Shape: (1107, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698947
  Min value:  -0.5768892216495461
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5409449791338365
  Min value:  -5.15537980838342
Epoch 001 | phase=1 | train_loss=4.8453 | val_main=0.486215
Epoch 002 | phase=1 | train_loss=4.4619 | val_main=0.486261
Epoch 003 | phase=1 | train_loss=4.2445 | val_main=0.486245
Epoch 004 | phase=1 | train_loss=4.1387 | val_main=0.486258
Epoch 005 | phase=1 | train_loss=4.0148 | val_main=0.486243
Epoch 006 | phase=1 | train_loss=3.8990 | val_main=0.486245
Epoch 007 | phase=1 | train_loss=3.8274 | val_main=0.486262
Epoch 008 | phase=1 | train_loss=3.8064 | val_main=0.486269
Epoch 009 | phase=1 | train_loss=3.7854 | val_main=0.486322
Epoch 010 | phase=1 | train_loss=3.6809 | val_main=0.486266
Epoch 011 | phase=1 | train_loss=3.6880 | val_main=0.486267
Epoch 012 | phase=1 | train_loss=3.5869 | val_main=0.486270
Epoch 013 | phase=1 | train_loss=3.4723 | val_main=0.486287
Epoch 014 | phase=1 | train_loss=3.4259 | val_main=0.486319
Epoch 015 | phase=1 | train_loss=3.3217 | val_main=0.486316
Epoch 016 | phase=0 | train_loss=4.1412 | val_main=0.466364
Epoch 017 | phase=0 | train_loss=4.0098 | val_main=0.465184
Epoch 018 | phase=0 | train_loss=3.8002 | val_main=0.463814
Epoch 019 | phase=0 | train_loss=3.5507 | val_main=0.426784
Epoch 020 | phase=0 | train_loss=3.3307 | val_main=0.385905
Epoch 021 | phase=0 | train_loss=3.1569 | val_main=0.353550
Epoch 022 | phase=0 | train_loss=2.9818 | val_main=0.329787
Epoch 023 | phase=0 | train_loss=2.8774 | val_main=0.303995
Epoch 024 | phase=0 | train_loss=2.7591 | val_main=0.291238
Epoch 025 | phase=0 | train_loss=2.7347 | val_main=0.277308
Epoch 026 | phase=0 | train_loss=2.6591 | val_main=0.271639
Epoch 027 | phase=0 | train_loss=2.5987 | val_main=0.269702
Epoch 028 | phase=0 | train_loss=2.4902 | val_main=0.263617
Epoch 029 | phase=0 | train_loss=2.5356 | val_main=0.254756
Epoch 030 | phase=0 | train_loss=2.4211 | val_main=0.252715
Epoch 031 | phase=2 | train_loss=0.1827 | val_main=0.257835
Epoch 032 | phase=2 | train_loss=0.1729 | val_main=0.260639
Epoch 033 | phase=2 | train_loss=0.1664 | val_main=0.255945
Epoch 034 | phase=2 | train_loss=0.1606 | val_main=0.252781
Epoch 035 | phase=2 | train_loss=0.1523 | val_main=0.257684
Epoch 036 | phase=2 | train_loss=0.1459 | val_main=0.255870
Epoch 037 | phase=2 | train_loss=0.1426 | val_main=0.259780
Epoch 038 | phase=2 | train_loss=0.1376 | val_main=0.258463
Epoch 039 | phase=2 | train_loss=0.1320 | val_main=0.263569
Epoch 040 | phase=2 | train_loss=0.1293 | val_main=0.263713
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 30
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.30624593
1 day(s) RMSE                      : 0.57106105
1 day(s) R2                        : 0.24061049
1 day(s) Pearson r                 : 0.50846614
1 day(s) QLIKE                     : 0.43266136
3 day(s) MAE                       : 0.30688224
3 day(s) RMSE                      : 0.58262028
3 day(s) R2                        : 0.21021676
3 day(s) Pearson r                 : 0.49192331
3 day(s) QLIKE                     : 0.46900276
5 day(s) MAE                       : 0.31118324
5 day(s) RMSE                      : 0.58999464
5 day(s) R2                        : 0.19240147
5 day(s) Pearson r                 : 0.47875836
5 day(s) QLIKE                     : 0.48800106
full horizon MAE                   : 0.31118324
full horizon RMSE                  : 0.58999464
full horizon R2                    : 0.19240147
full horizon Pearson r             : 0.47875836
full horizon QLIKE                 : 0.48800106

--- Task 2 ---
1 day(s) MAE                       : 0.03859776
1 day(s) RMSE                      : 0.05800627
1 day(s) R2                        : -0.59939448
1 day(s) Pearson r                 : 0.03273520
1 day(s) QLIKE                     : 16.12666359
3 day(s) MAE                       : 0.03876399
3 day(s) RMSE                      : 0.05825532
3 day(s) R2                        : -0.61418451
3 day(s) Pearson r                 : 0.00749837
3 day(s) QLIKE                     : 16.00127841
5 day(s) MAE                       : 0.03898686
5 day(s) RMSE                      : 0.05843149
5 day(s) R2                        : -0.62498962
5 day(s) Pearson r                 : 0.01286188
5 day(s) QLIKE                     : 16.08296578
full horizon MAE                   : 0.03898686
full horizon RMSE                  : 0.05843149
full horizon R2                    : -0.62498962
full horizon Pearson r             : 0.01286188
full horizon QLIKE                 : 16.08296578

--- Task 3 ---
1 day(s) MAE                       : 0.32218820
1 day(s) RMSE                      : 0.47121069
1 day(s) R2                        : -1.12201589
1 day(s) Pearson r                 : -0.28189649
1 day(s) QLIKE                     : 8.99346361
3 day(s) MAE                       : 0.31321132
3 day(s) RMSE                      : 0.46065969
3 day(s) R2                        : -1.02846555
3 day(s) Pearson r                 : -0.26209868
3 day(s) QLIKE                     : 8.91067406
5 day(s) MAE                       : 0.31150593
5 day(s) RMSE                      : 0.46173986
5 day(s) R2                        : -1.03828651
5 day(s) Pearson r                 : -0.26026291
5 day(s) QLIKE                     : 8.76303588
full horizon MAE                   : 0.31150593
full horizon RMSE                  : 0.46173986
full horizon R2                    : -1.03828651
full horizon Pearson r             : -0.26026291
full horizon QLIKE                 : 8.76303588

--- Task 4 ---
1 day(s) MAE                       : 0.12266798
1 day(s) RMSE                      : 0.25652902
1 day(s) R2                        : -0.28460428
1 day(s) Pearson r                 : -0.14410528
1 day(s) QLIKE                     : 9.64751757
3 day(s) MAE                       : 0.12266684
3 day(s) RMSE                      : 0.25647125
3 day(s) R2                        : -0.28417005
3 day(s) Pearson r                 : -0.13973070
3 day(s) QLIKE                     : 9.55576185
5 day(s) MAE                       : 0.12262754
5 day(s) RMSE                      : 0.25651424
5 day(s) R2                        : -0.28473536
5 day(s) Pearson r                 : -0.13884136
5 day(s) QLIKE                     : 9.58383400
full horizon MAE                   : 0.12262754
full horizon RMSE                  : 0.25651424
full horizon R2                    : -0.28473536
full horizon Pearson r             : -0.13884136
full horizon QLIKE                 : 9.58383400

--- Task 5 ---
1 day(s) MAE                       : 0.03931979
1 day(s) RMSE                      : 0.05751320
1 day(s) R2                        : -0.64843829
1 day(s) Pearson r                 : 0.09119713
1 day(s) QLIKE                     : 16.15975286
3 day(s) MAE                       : 0.03948505
3 day(s) RMSE                      : 0.05761201
3 day(s) R2                        : -0.64709866
3 day(s) Pearson r                 : 0.08992802
3 day(s) QLIKE                     : 16.19944717
5 day(s) MAE                       : 0.03968304
5 day(s) RMSE                      : 0.05782702
5 day(s) R2                        : -0.65194128
5 day(s) Pearson r                 : 0.08773869
5 day(s) QLIKE                     : 16.21979674
full horizon MAE                   : 0.03968304
full horizon RMSE                  : 0.05782702
full horizon R2                    : -0.65194128
full horizon Pearson r             : 0.08773869
full horizon QLIKE                 : 16.21979674

--- Task 6 ---
1 day(s) MAE                       : 0.94327033
1 day(s) RMSE                      : 1.92860666
1 day(s) R2                        : -10.63563947
1 day(s) Pearson r                 : -0.06782363
1 day(s) QLIKE                     : 0.04258075
3 day(s) MAE                       : 0.94821628
3 day(s) RMSE                      : 1.94877123
3 day(s) R2                        : -10.85781622
3 day(s) Pearson r                 : -0.07384000
3 day(s) QLIKE                     : 0.04297525
5 day(s) MAE                       : 0.94726449
5 day(s) RMSE                      : 1.95748450
5 day(s) R2                        : -10.94429834
5 day(s) Pearson r                 : -0.07532901
5 day(s) QLIKE                     : 0.04290490
full horizon MAE                   : 0.94726449
full horizon RMSE                  : 1.95748450
full horizon R2                    : -10.94429834
full horizon Pearson r             : -0.07532901
full horizon QLIKE                 : 0.04290490

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/GOLD/Custom_KAN_H5.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.00236784, max=2.65448

=== GOLD | H=5 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 5534
Time steps for y: 5
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  17.63876776611196
  Min value:  -0.5790835075307484
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.254005191594221
  Min value:  -5.250623802304656
Checking X_price_val:
Shape: (443, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722398346
  Min value:  -0.5755730380699846
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.790672308331636
  Min value:  -5.15537980838342
Checking X_price_test:
Shape: (1107, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698947
  Min value:  -0.5768892216495461
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5409449791338365
  Min value:  -5.15537980838342
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 185s 339ms/step - loss: 0.6295 - val_loss: 0.4907 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 31s 248ms/step - loss: 0.4689 - val_loss: 0.5174 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 31s 246ms/step - loss: 0.4561 - val_loss: 0.4856 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 31s 246ms/step - loss: 0.4497 - val_loss: 0.6141 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 31s 248ms/step - loss: 0.4505 - val_loss: 0.5311 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 32s 253ms/step - loss: 0.4438 - val_loss: 0.5002 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 32s 256ms/step - loss: 0.4457 - val_loss: 0.5615 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 0s 271ms/step - loss: 0.4325
Epoch 8: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
125/125 ━━━━━━━━━━━━━━━━━━━━ 35s 279ms/step - loss: 0.4444 - val_loss: 0.5347 - learning_rate: 5.0000e-04
Epoch 8: early stopping
Restoring model weights from the end of the best epoch: 3.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 5
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.43206667
1 day(s) RMSE                      : 0.71059443
1 day(s) R2                        : -0.17582615
1 day(s) Pearson r                 : 0.40932228
1 day(s) QLIKE                     : 0.48399667
3 day(s) MAE                       : 0.44194800
3 day(s) RMSE                      : 0.71794698
3 day(s) R2                        : -0.19928239
3 day(s) Pearson r                 : 0.33819012
3 day(s) QLIKE                     : 0.49724586
5 day(s) MAE                       : 0.45404049
5 day(s) RMSE                      : 0.73130276
5 day(s) R2                        : -0.24077720
5 day(s) Pearson r                 : 0.31496787
5 day(s) QLIKE                     : 0.50507291
full horizon MAE                   : 0.45404049
full horizon RMSE                  : 0.73130276
full horizon R2                    : -0.24077720
full horizon Pearson r             : 0.31496787
full horizon QLIKE                 : 0.50507291

--- Task 2 ---
1 day(s) MAE                       : 0.04020814
1 day(s) RMSE                      : 0.06098747
1 day(s) R2                        : -0.76801911
1 day(s) Pearson r                 : 0.12025653
1 day(s) QLIKE                     : 10.02086693
3 day(s) MAE                       : 0.04025540
3 day(s) RMSE                      : 0.06100942
3 day(s) R2                        : -0.77041775
3 day(s) Pearson r                 : 0.08470533
3 day(s) QLIKE                     : 11.61569066
5 day(s) MAE                       : 0.04030857
5 day(s) RMSE                      : 0.06103612
5 day(s) R2                        : -0.77308897
5 day(s) Pearson r                 : 0.06528350
5 day(s) QLIKE                     : 10.47297948
full horizon MAE                   : 0.04030857
full horizon RMSE                  : 0.06103612
full horizon R2                    : -0.77308897
full horizon Pearson r             : 0.06528350
full horizon QLIKE                 : 10.47297948

--- Task 3 ---
1 day(s) MAE                       : 0.40612354
1 day(s) RMSE                      : 0.46610950
1 day(s) R2                        : -1.07631995
1 day(s) Pearson r                 : 0.10193709
1 day(s) QLIKE                     : 3.55901107
3 day(s) MAE                       : 0.53266278
3 day(s) RMSE                      : 0.64209813
3 day(s) R2                        : -2.94103261
3 day(s) Pearson r                 : 0.09341926
3 day(s) QLIKE                     : 9.12673219
5 day(s) MAE                       : 0.58925515
5 day(s) RMSE                      : 0.69280139
5 day(s) R2                        : -3.58868228
5 day(s) Pearson r                 : 0.08227731
5 day(s) QLIKE                     : 8.54226127
full horizon MAE                   : 0.58925515
full horizon RMSE                  : 0.69280139
full horizon R2                    : -3.58868228
full horizon Pearson r             : 0.08227731
full horizon QLIKE                 : 8.54226127

--- Task 4 ---
1 day(s) MAE                       : 0.12250138
1 day(s) RMSE                      : 0.25716060
1 day(s) R2                        : -0.29093752
1 day(s) Pearson r                 : 0.01238657
1 day(s) QLIKE                     : 2.04339468
3 day(s) MAE                       : 0.12312037
3 day(s) RMSE                      : 0.25721707
3 day(s) R2                        : -0.29164964
3 day(s) Pearson r                 : -0.00261888
3 day(s) QLIKE                     : 2.24709272
5 day(s) MAE                       : 0.12348446
5 day(s) RMSE                      : 0.25713565
5 day(s) R2                        : -0.29096746
5 day(s) Pearson r                 : -0.00733922
5 day(s) QLIKE                     : 2.27108617
full horizon MAE                   : 0.12348446
full horizon RMSE                  : 0.25713565
full horizon R2                    : -0.29096746
full horizon Pearson r             : -0.00733922
full horizon QLIKE                 : 2.27108617

--- Task 5 ---
1 day(s) MAE                       : 0.03995632
1 day(s) RMSE                      : 0.06001259
1 day(s) R2                        : -0.79482590
1 day(s) Pearson r                 : 0.22361798
1 day(s) QLIKE                     : 6.10222592
3 day(s) MAE                       : 0.04005451
3 day(s) RMSE                      : 0.06015792
3 day(s) R2                        : -0.79588768
3 day(s) Pearson r                 : 0.12179638
3 day(s) QLIKE                     : 12.63240604
5 day(s) MAE                       : 0.04014805
5 day(s) RMSE                      : 0.06029767
5 day(s) R2                        : -0.79611487
5 day(s) Pearson r                 : 0.09282771
5 day(s) QLIKE                     : 10.08221868
full horizon MAE                   : 0.04014805
full horizon RMSE                  : 0.06029767
full horizon R2                    : -0.79611487
full horizon Pearson r             : 0.09282771
full horizon QLIKE                 : 10.08221868

--- Task 6 ---
1 day(s) MAE                       : 0.68837781
1 day(s) RMSE                      : 0.83233554
1 day(s) R2                        : -1.16720408
1 day(s) Pearson r                 : -0.08158636
1 day(s) QLIKE                     : 0.02485084
3 day(s) MAE                       : 0.71045682
3 day(s) RMSE                      : 0.85609521
3 day(s) R2                        : -1.28837467
3 day(s) Pearson r                 : -0.07304605
3 day(s) QLIKE                     : 0.02518731
5 day(s) MAE                       : 0.69918520
5 day(s) RMSE                      : 0.84706948
5 day(s) R2                        : -1.23667070
5 day(s) Pearson r                 : -0.06897188
5 day(s) QLIKE                     : 0.02440826
full horizon MAE                   : 0.69918520
full horizon RMSE                  : 0.84706948
full horizon R2                    : -1.23667070
full horizon Pearson r             : -0.06897188
full horizon QLIKE                 : 0.02440826

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/GOLD/Custom_KAN_LSTM_H5.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.186091, max=0.642617

=== GOLD | H=10 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 5534
Time steps for y: 10
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  17.63876776611196
  Min value:  -0.5790835075307484
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.254200938794579
  Min value:  -5.254244390683287
Checking X_price_val:
Shape: (443, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722398346
  Min value:  -0.5755730380699846
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.7918153902171274
  Min value:  -5.155380789277184
Checking X_price_test:
Shape: (1107, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698947
  Min value:  -0.5768892216495461
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5419191122017313
  Min value:  -5.155380789277184
Epoch 001 | phase=1 | train_loss=4.8597 | val_main=0.487606
Epoch 002 | phase=1 | train_loss=4.4647 | val_main=0.487826
Epoch 003 | phase=1 | train_loss=4.2466 | val_main=0.487803
Epoch 004 | phase=1 | train_loss=4.1432 | val_main=0.487805
Epoch 005 | phase=1 | train_loss=4.0099 | val_main=0.487810
Epoch 006 | phase=1 | train_loss=3.8979 | val_main=0.487801
Epoch 007 | phase=1 | train_loss=3.8308 | val_main=0.487822
Epoch 008 | phase=1 | train_loss=3.7954 | val_main=0.487743
Epoch 009 | phase=1 | train_loss=3.8122 | val_main=0.487778
Epoch 010 | phase=1 | train_loss=3.6835 | val_main=0.487863
Epoch 011 | phase=1 | train_loss=3.6653 | val_main=0.487775
Epoch 012 | phase=1 | train_loss=3.6700 | val_main=0.487901
Epoch 013 | phase=1 | train_loss=3.5210 | val_main=0.488104
Epoch 014 | phase=1 | train_loss=3.4407 | val_main=0.487977
Epoch 015 | phase=1 | train_loss=3.4414 | val_main=0.488074
Epoch 016 | phase=0 | train_loss=4.2478 | val_main=0.467706
Epoch 017 | phase=0 | train_loss=4.1510 | val_main=0.462619
Epoch 018 | phase=0 | train_loss=4.0065 | val_main=0.467026
Epoch 019 | phase=0 | train_loss=3.8065 | val_main=0.443451
Epoch 020 | phase=0 | train_loss=3.6277 | val_main=0.399249
Epoch 021 | phase=0 | train_loss=3.4504 | val_main=0.382608
Epoch 022 | phase=0 | train_loss=3.3354 | val_main=0.360200
Epoch 023 | phase=0 | train_loss=3.1891 | val_main=0.339906
Epoch 024 | phase=0 | train_loss=3.0212 | val_main=0.322760
Epoch 025 | phase=0 | train_loss=3.0170 | val_main=0.309964
Epoch 026 | phase=0 | train_loss=2.8825 | val_main=0.291942
Epoch 027 | phase=0 | train_loss=2.8326 | val_main=0.280712
Epoch 028 | phase=0 | train_loss=2.7493 | val_main=0.274371
Epoch 029 | phase=0 | train_loss=2.6625 | val_main=0.263521
Epoch 030 | phase=0 | train_loss=2.6681 | val_main=0.262418
Epoch 031 | phase=2 | train_loss=0.2141 | val_main=0.260010
Epoch 032 | phase=2 | train_loss=0.1979 | val_main=0.259481
Epoch 033 | phase=2 | train_loss=0.1910 | val_main=0.259308
Epoch 034 | phase=2 | train_loss=0.1811 | val_main=0.268092
Epoch 035 | phase=2 | train_loss=0.1761 | val_main=0.258075
Epoch 036 | phase=2 | train_loss=0.1694 | val_main=0.264835
Epoch 037 | phase=2 | train_loss=0.1649 | val_main=0.263034
Epoch 038 | phase=2 | train_loss=0.1605 | val_main=0.259285
Epoch 039 | phase=2 | train_loss=0.1553 | val_main=0.263563
Epoch 040 | phase=2 | train_loss=0.1517 | val_main=0.263600
Epoch 041 | phase=2 | train_loss=0.1493 | val_main=0.268854
Epoch 042 | phase=2 | train_loss=0.1483 | val_main=0.268007
Epoch 043 | phase=2 | train_loss=0.1446 | val_main=0.264932
Epoch 044 | phase=2 | train_loss=0.1437 | val_main=0.276412
Epoch 045 | phase=2 | train_loss=0.1391 | val_main=0.272636
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 60
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.29360863
1 day(s) RMSE                      : 0.54910075
1 day(s) R2                        : 0.29789255
1 day(s) Pearson r                 : 0.56564221
1 day(s) QLIKE                     : 0.46071299
3 day(s) MAE                       : 0.29934640
3 day(s) RMSE                      : 0.56427540
3 day(s) R2                        : 0.25916932
3 day(s) Pearson r                 : 0.53168179
3 day(s) QLIKE                     : 0.46458633
5 day(s) MAE                       : 0.30527375
5 day(s) RMSE                      : 0.57499768
5 day(s) R2                        : 0.23293606
5 day(s) Pearson r                 : 0.50964844
5 day(s) QLIKE                     : 0.47451782
10 day(s) MAE                      : 0.31790361
10 day(s) RMSE                     : 0.58755326
10 day(s) R2                       : 0.20800525
10 day(s) Pearson r                : 0.48846445
10 day(s) QLIKE                    : 0.50967996
full horizon MAE                   : 0.31790361
full horizon RMSE                  : 0.58755326
full horizon R2                    : 0.20800525
full horizon Pearson r             : 0.48846445
full horizon QLIKE                 : 0.50967996

--- Task 2 ---
1 day(s) MAE                       : 0.03845333
1 day(s) RMSE                      : 0.05787924
1 day(s) R2                        : -0.59239671
1 day(s) Pearson r                 : 0.06653659
1 day(s) QLIKE                     : 17.07209622
3 day(s) MAE                       : 0.03870939
3 day(s) RMSE                      : 0.05832744
3 day(s) R2                        : -0.61818348
3 day(s) Pearson r                 : 0.03789019
3 day(s) QLIKE                     : 17.07478202
5 day(s) MAE                       : 0.03875479
5 day(s) RMSE                      : 0.05838072
5 day(s) R2                        : -0.62216714
5 day(s) Pearson r                 : 0.03804032
5 day(s) QLIKE                     : 17.12412113
10 day(s) MAE                      : 0.03897190
10 day(s) RMSE                     : 0.05949218
10 day(s) R2                       : -0.68808215
10 day(s) Pearson r                : 0.05820633
10 day(s) QLIKE                    : 17.06791371
full horizon MAE                   : 0.03897190
full horizon RMSE                  : 0.05949218
full horizon R2                    : -0.68808215
full horizon Pearson r             : 0.05820633
full horizon QLIKE                 : 17.06791371

--- Task 3 ---
1 day(s) MAE                       : 0.32123822
1 day(s) RMSE                      : 0.47028574
1 day(s) R2                        : -1.11369336
1 day(s) Pearson r                 : -0.20389476
1 day(s) QLIKE                     : 8.33558087
3 day(s) MAE                       : 0.31618882
3 day(s) RMSE                      : 0.46758947
3 day(s) R2                        : -1.08995366
3 day(s) Pearson r                 : -0.20020862
3 day(s) QLIKE                     : 8.27873845
5 day(s) MAE                       : 0.31730235
5 day(s) RMSE                      : 0.46995817
5 day(s) R2                        : -1.11148937
5 day(s) Pearson r                 : -0.20227173
5 day(s) QLIKE                     : 8.18226782
10 day(s) MAE                      : 0.31971297
10 day(s) RMSE                     : 0.46889605
10 day(s) R2                       : -1.11376202
10 day(s) Pearson r                : -0.20462423
10 day(s) QLIKE                    : 8.13434322
full horizon MAE                   : 0.31971297
full horizon RMSE                  : 0.46889605
full horizon R2                    : -1.11376202
full horizon Pearson r             : -0.20462423
full horizon QLIKE                 : 8.13434322

--- Task 4 ---
1 day(s) MAE                       : 0.12365697
1 day(s) RMSE                      : 0.25683762
1 day(s) R2                        : -0.28769687
1 day(s) Pearson r                 : -0.10019129
1 day(s) QLIKE                     : 9.00004823
3 day(s) MAE                       : 0.12375450
3 day(s) RMSE                      : 0.25674392
3 day(s) R2                        : -0.28690208
3 day(s) Pearson r                 : -0.09109618
3 day(s) QLIKE                     : 9.00581733
5 day(s) MAE                       : 0.12364490
5 day(s) RMSE                      : 0.25668043
5 day(s) R2                        : -0.28640058
5 day(s) Pearson r                 : -0.08332395
5 day(s) QLIKE                     : 9.04784459
10 day(s) MAE                      : 0.12299866
10 day(s) RMSE                     : 0.25576006
10 day(s) R2                       : -0.28630420
10 day(s) Pearson r                : -0.08520947
10 day(s) QLIKE                    : 9.00207557
full horizon MAE                   : 0.12299866
full horizon RMSE                  : 0.25576006
full horizon R2                    : -0.28630420
full horizon Pearson r             : -0.08520947
full horizon QLIKE                 : 9.00207557

--- Task 5 ---
1 day(s) MAE                       : 0.04082043
1 day(s) RMSE                      : 0.05998545
1 day(s) R2                        : -0.79320311
1 day(s) Pearson r                 : 0.00461281
1 day(s) QLIKE                     : 16.01003543
3 day(s) MAE                       : 0.04068012
3 day(s) RMSE                      : 0.05978835
3 day(s) R2                        : -0.77389015
3 day(s) Pearson r                 : 0.01317244
3 day(s) QLIKE                     : 16.13536875
5 day(s) MAE                       : 0.04077408
5 day(s) RMSE                      : 0.05992991
5 day(s) R2                        : -0.77427256
5 day(s) Pearson r                 : 0.01662860
5 day(s) QLIKE                     : 16.16940756
10 day(s) MAE                      : 0.04080745
10 day(s) RMSE                     : 0.06009523
10 day(s) R2                       : -0.76390668
10 day(s) Pearson r                : 0.01847634
10 day(s) QLIKE                    : 16.22218721
full horizon MAE                   : 0.04080745
full horizon RMSE                  : 0.06009523
full horizon R2                    : -0.76390668
full horizon Pearson r             : 0.01847634
full horizon QLIKE                 : 16.22218721

--- Task 6 ---
1 day(s) MAE                       : 1.03990013
1 day(s) RMSE                      : 2.10210271
1 day(s) R2                        : -12.82327021
1 day(s) Pearson r                 : -0.06105258
1 day(s) QLIKE                     : 0.04843702
3 day(s) MAE                       : 1.04135044
3 day(s) RMSE                      : 2.10568548
3 day(s) R2                        : -12.84426799
3 day(s) Pearson r                 : -0.06879452
3 day(s) QLIKE                     : 0.04869126
5 day(s) MAE                       : 1.04461871
5 day(s) RMSE                      : 2.12246456
5 day(s) R2                        : -13.04251420
5 day(s) Pearson r                 : -0.07317854
5 day(s) QLIKE                     : 0.04896127
10 day(s) MAE                      : 1.04698649
10 day(s) RMSE                     : 2.15755178
10 day(s) R2                       : -13.41787317
10 day(s) Pearson r                : -0.07981985
10 day(s) QLIKE                    : 0.04918642
full horizon MAE                   : 1.04698649
full horizon RMSE                  : 2.15755178
full horizon R2                    : -13.41787317
full horizon Pearson r             : -0.07981985
full horizon QLIKE                 : 0.04918642

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/GOLD/Custom_KAN_H10.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.00299108, max=3.8361

=== GOLD | H=10 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 5534
Time steps for y: 10
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  17.63876776611196
  Min value:  -0.5790835075307484
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.254200938794579
  Min value:  -5.254244390683287
Checking X_price_val:
Shape: (443, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722398346
  Min value:  -0.5755730380699846
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.7918153902171274
  Min value:  -5.155380789277184
Checking X_price_test:
Shape: (1107, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698947
  Min value:  -0.5768892216495461
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5419191122017313
  Min value:  -5.155380789277184
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 190s 364ms/step - loss: 0.5710 - val_loss: 0.5149 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 39s 315ms/step - loss: 0.4048 - val_loss: 0.6005 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 39s 313ms/step - loss: 0.3923 - val_loss: 0.5700 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 39s 313ms/step - loss: 0.3842 - val_loss: 0.6132 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 39s 312ms/step - loss: 0.3787 - val_loss: 0.6285 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 0s 305ms/step - loss: 0.3689
Epoch 6: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
125/125 ━━━━━━━━━━━━━━━━━━━━ 39s 314ms/step - loss: 0.3781 - val_loss: 0.6857 - learning_rate: 5.0000e-04
Epoch 6: early stopping
Restoring model weights from the end of the best epoch: 1.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 10
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.42404226
1 day(s) RMSE                      : 0.71976519
1 day(s) R2                        : -0.20637186
1 day(s) Pearson r                 : 0.45772640
1 day(s) QLIKE                     : 0.52656784
3 day(s) MAE                       : 0.43106096
3 day(s) RMSE                      : 0.72779656
3 day(s) R2                        : -0.23241425
3 day(s) Pearson r                 : 0.06627836
3 day(s) QLIKE                     : 0.53470351
5 day(s) MAE                       : 0.44396251
5 day(s) RMSE                      : 0.74036189
5 day(s) R2                        : -0.27170826
5 day(s) Pearson r                 : 0.04071285
5 day(s) QLIKE                     : 0.53896608
10 day(s) MAE                      : 0.45015701
10 day(s) RMSE                     : 0.74803716
10 day(s) R2                       : -0.28373141
10 day(s) Pearson r                : 0.06546412
10 day(s) QLIKE                    : 0.53658728
full horizon MAE                   : 0.45015701
full horizon RMSE                  : 0.74803716
full horizon R2                    : -0.28373141
full horizon Pearson r             : 0.06546412
full horizon QLIKE                 : 0.53658728

--- Task 2 ---
1 day(s) MAE                       : 0.04022236
1 day(s) RMSE                      : 0.06100458
1 day(s) R2                        : -0.76901104
1 day(s) Pearson r                 : 0.09617776
1 day(s) QLIKE                     : 7.88909217
3 day(s) MAE                       : 0.04026947
3 day(s) RMSE                      : 0.06102485
3 day(s) R2                        : -0.77131361
3 day(s) Pearson r                 : 0.02318208
3 day(s) QLIKE                     : 12.77419191
5 day(s) MAE                       : 0.04031703
5 day(s) RMSE                      : 0.06104540
5 day(s) R2                        : -0.77362771
5 day(s) Pearson r                 : 0.01597958
5 day(s) QLIKE                     : 11.38443393
10 day(s) MAE                      : 0.04035356
10 day(s) RMSE                     : 0.06103325
10 day(s) R2                       : -0.77667028
10 day(s) Pearson r                : 0.01043649
10 day(s) QLIKE                    : 9.70225226
full horizon MAE                   : 0.04035356
full horizon RMSE                  : 0.06103325
full horizon R2                    : -0.77667028
full horizon Pearson r             : 0.01043649
full horizon QLIKE                 : 9.70225226

--- Task 3 ---
1 day(s) MAE                       : 0.60274216
1 day(s) RMSE                      : 0.62241384
1 day(s) R2                        : -2.70234460
1 day(s) Pearson r                 : 0.02240481
1 day(s) QLIKE                     : 3.46570356
3 day(s) MAE                       : 0.73301975
3 day(s) RMSE                      : 0.78952214
3 day(s) R2                        : -4.95848485
3 day(s) Pearson r                 : 0.00034463
3 day(s) QLIKE                     : 6.37132781
5 day(s) MAE                       : 0.75930379
5 day(s) RMSE                      : 0.81916049
5 day(s) R2                        : -5.41517465
5 day(s) Pearson r                 : 0.00027992
5 day(s) QLIKE                     : 6.16621363
10 day(s) MAE                      : 0.77985219
10 day(s) RMSE                     : 0.84115812
10 day(s) R2                       : -5.80233819
10 day(s) Pearson r                : -0.00066100
10 day(s) QLIKE                    : 5.43409639
full horizon MAE                   : 0.77985219
full horizon RMSE                  : 0.84115812
full horizon R2                    : -5.80233819
full horizon Pearson r             : -0.00066100
full horizon QLIKE                 : 5.43409639

--- Task 4 ---
1 day(s) MAE                       : 0.12328881
1 day(s) RMSE                      : 0.25750286
1 day(s) R2                        : -0.29437605
1 day(s) Pearson r                 : 0.03309716
1 day(s) QLIKE                     : 1.87773506
3 day(s) MAE                       : 0.12409543
3 day(s) RMSE                      : 0.25795268
3 day(s) R2                        : -0.29904813
3 day(s) Pearson r                 : 0.00440123
3 day(s) QLIKE                     : 1.91877380
5 day(s) MAE                       : 0.12427902
5 day(s) RMSE                      : 0.25805564
5 day(s) R2                        : -0.30022181
5 day(s) Pearson r                 : -0.00417565
5 day(s) QLIKE                     : 1.91038814
10 day(s) MAE                      : 0.12453425
10 day(s) RMSE                     : 0.25753020
10 day(s) R2                       : -0.30417104
10 day(s) Pearson r                : 0.00116767
10 day(s) QLIKE                    : 2.52776731
full horizon MAE                   : 0.12453425
full horizon RMSE                  : 0.25753020
full horizon R2                    : -0.30417104
full horizon Pearson r             : 0.00116767
full horizon QLIKE                 : 2.52776731

--- Task 5 ---
1 day(s) MAE                       : 0.03993922
1 day(s) RMSE                      : 0.05999693
1 day(s) R2                        : -0.79388908
1 day(s) Pearson r                 : 0.19143643
1 day(s) QLIKE                     : 5.82919191
3 day(s) MAE                       : 0.03997815
3 day(s) RMSE                      : 0.06006432
3 day(s) R2                        : -0.79030362
3 day(s) Pearson r                 : 0.00013201
3 day(s) QLIKE                     : 6.24061190
5 day(s) MAE                       : 0.04010202
5 day(s) RMSE                      : 0.06024137
5 day(s) R2                        : -0.79276223
5 day(s) Pearson r                 : -0.00134503
5 day(s) QLIKE                     : 14.73205392
10 day(s) MAE                      : 0.04035035
10 day(s) RMSE                     : 0.06061240
10 day(s) R2                       : -0.79439726
10 day(s) Pearson r                : -0.00294715
10 day(s) QLIKE                    : 12.46632148
full horizon MAE                   : 0.04035035
full horizon RMSE                  : 0.06061240
full horizon R2                    : -0.79439726
full horizon Pearson r             : -0.00294715
full horizon QLIKE                 : 12.46632148

--- Task 6 ---
1 day(s) MAE                       : 0.44225776
1 day(s) RMSE                      : 0.58547586
1 day(s) R2                        : -0.07231140
1 day(s) Pearson r                 : -0.12081817
1 day(s) QLIKE                     : 0.01176597
3 day(s) MAE                       : 0.44974378
3 day(s) RMSE                      : 0.60362830
3 day(s) R2                        : -0.13768538
3 day(s) Pearson r                 : -0.08419066
3 day(s) QLIKE                     : 0.01178449
5 day(s) MAE                       : 0.45056106
5 day(s) RMSE                      : 0.60224851
5 day(s) R2                        : -0.13061682
5 day(s) Pearson r                 : -0.10136143
5 day(s) QLIKE                     : 0.01196118
10 day(s) MAE                      : 0.47998267
10 day(s) RMSE                     : 0.61731050
10 day(s) R2                       : -0.18028278
10 day(s) Pearson r                : -0.06516413
10 day(s) QLIKE                    : 0.01337909
full horizon MAE                   : 0.47998267
full horizon RMSE                  : 0.61731050
full horizon R2                    : -0.18028278
full horizon Pearson r             : -0.06516413
full horizon QLIKE                 : 0.01337909

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/GOLD/Custom_KAN_LSTM_H10.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.272388, max=0.396835

=== GOLD | H=20 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 5534
Time steps for y: 20
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  17.63876776611196
  Min value:  -0.5790835075307484
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.254162049886622
  Min value:  -5.257736685381158
Checking X_price_val:
Shape: (443, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722398346
  Min value:  -0.5755730380699846
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.7933331106383543
  Min value:  -5.155382782210931
Checking X_price_test:
Shape: (1107, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698947
  Min value:  -0.5768892216495461
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5432591469134986
  Min value:  -5.155382782210931
Epoch 001 | phase=1 | train_loss=4.8936 | val_main=0.489308
Epoch 002 | phase=1 | train_loss=4.5083 | val_main=0.489570
Epoch 003 | phase=1 | train_loss=4.2700 | val_main=0.489478
Epoch 004 | phase=1 | train_loss=4.1634 | val_main=0.489465
Epoch 005 | phase=1 | train_loss=4.0329 | val_main=0.489449
Epoch 006 | phase=1 | train_loss=3.9218 | val_main=0.489454
Epoch 007 | phase=1 | train_loss=3.8505 | val_main=0.489470
Epoch 008 | phase=1 | train_loss=3.8028 | val_main=0.489452
Epoch 009 | phase=1 | train_loss=3.8042 | val_main=0.489441
Epoch 010 | phase=1 | train_loss=3.7046 | val_main=0.489443
Epoch 011 | phase=1 | train_loss=3.7317 | val_main=0.489412
Epoch 012 | phase=1 | train_loss=3.6564 | val_main=0.489369
Epoch 013 | phase=1 | train_loss=3.5545 | val_main=0.489291
Epoch 014 | phase=1 | train_loss=3.5000 | val_main=0.489295
Epoch 015 | phase=1 | train_loss=3.4899 | val_main=0.489278
Epoch 016 | phase=0 | train_loss=4.3174 | val_main=0.478563
Epoch 017 | phase=0 | train_loss=4.2483 | val_main=0.472755
Epoch 018 | phase=0 | train_loss=4.1841 | val_main=0.487989
Epoch 019 | phase=0 | train_loss=3.9586 | val_main=0.495664
Epoch 020 | phase=0 | train_loss=3.8126 | val_main=0.456354
Epoch 021 | phase=0 | train_loss=3.6385 | val_main=0.446043
Epoch 022 | phase=0 | train_loss=3.4627 | val_main=0.426894
Epoch 023 | phase=0 | train_loss=3.3176 | val_main=0.405713
Epoch 024 | phase=0 | train_loss=3.1557 | val_main=0.388327
Epoch 025 | phase=0 | train_loss=3.3220 | val_main=0.380061
Epoch 026 | phase=0 | train_loss=3.0749 | val_main=0.359082
Epoch 027 | phase=0 | train_loss=3.0026 | val_main=0.346406
Epoch 028 | phase=0 | train_loss=2.9814 | val_main=0.331655
Epoch 029 | phase=0 | train_loss=2.8759 | val_main=0.325270
Epoch 030 | phase=0 | train_loss=2.8120 | val_main=0.310593
Epoch 031 | phase=2 | train_loss=0.2524 | val_main=0.304199
Epoch 032 | phase=2 | train_loss=0.2378 | val_main=0.297359
Epoch 033 | phase=2 | train_loss=0.2276 | val_main=0.293016
Epoch 034 | phase=2 | train_loss=0.2200 | val_main=0.287833
Epoch 035 | phase=2 | train_loss=0.2115 | val_main=0.292914
Epoch 036 | phase=2 | train_loss=0.2042 | val_main=0.285422
Epoch 037 | phase=2 | train_loss=0.1992 | val_main=0.293194
Epoch 038 | phase=2 | train_loss=0.1949 | val_main=0.286565
Epoch 039 | phase=2 | train_loss=0.1882 | val_main=0.285057
Epoch 040 | phase=2 | train_loss=0.1847 | val_main=0.292776
Epoch 041 | phase=2 | train_loss=0.1796 | val_main=0.290013
Epoch 042 | phase=2 | train_loss=0.1772 | val_main=0.291189
Epoch 043 | phase=2 | train_loss=0.1750 | val_main=0.298379
Epoch 044 | phase=2 | train_loss=0.1733 | val_main=0.294032
Epoch 045 | phase=2 | train_loss=0.1673 | val_main=0.295401
Epoch 046 | phase=2 | train_loss=0.1653 | val_main=0.299802
Epoch 047 | phase=2 | train_loss=0.1614 | val_main=0.301543
Epoch 048 | phase=2 | train_loss=0.1605 | val_main=0.299931
Epoch 049 | phase=2 | train_loss=0.1590 | val_main=0.302226
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 120
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.29729663
1 day(s) RMSE                      : 0.56791720
1 day(s) R2                        : 0.24894876
1 day(s) Pearson r                 : 0.52812074
1 day(s) QLIKE                     : 0.42387564
3 day(s) MAE                       : 0.30936414
3 day(s) RMSE                      : 0.58992890
3 day(s) R2                        : 0.19027778
3 day(s) Pearson r                 : 0.48381351
3 day(s) QLIKE                     : 0.41941654
5 day(s) MAE                       : 0.31299261
5 day(s) RMSE                      : 0.59163713
5 day(s) R2                        : 0.18789865
5 day(s) Pearson r                 : 0.47603819
5 day(s) QLIKE                     : 0.44993289
10 day(s) MAE                      : 0.32408643
10 day(s) RMSE                     : 0.59401634
10 day(s) R2                       : 0.19048556
10 day(s) Pearson r                : 0.47967996
10 day(s) QLIKE                    : 0.48696600
20 day(s) MAE                      : 0.34681460
20 day(s) RMSE                     : 0.63178049
20 day(s) R2                       : 0.09780080
20 day(s) Pearson r                : 0.40129759
20 day(s) QLIKE                    : 0.56078898
full horizon MAE                   : 0.34681460
full horizon RMSE                  : 0.63178049
full horizon R2                    : 0.09780080
full horizon Pearson r             : 0.40129759
full horizon QLIKE                 : 0.56078898

--- Task 2 ---
1 day(s) MAE                       : 0.03788411
1 day(s) RMSE                      : 0.05709707
1 day(s) R2                        : -0.54964898
1 day(s) Pearson r                 : 0.08087111
1 day(s) QLIKE                     : 16.96825879
3 day(s) MAE                       : 0.03807888
3 day(s) RMSE                      : 0.05730907
3 day(s) R2                        : -0.56217118
3 day(s) Pearson r                 : 0.06273781
3 day(s) QLIKE                     : 16.97195979
5 day(s) MAE                       : 0.03802897
5 day(s) RMSE                      : 0.05733861
5 day(s) R2                        : -0.56477136
5 day(s) Pearson r                 : 0.05918947
5 day(s) QLIKE                     : 17.03609578
10 day(s) MAE                      : 0.03801863
10 day(s) RMSE                     : 0.05739204
10 day(s) R2                       : -0.57100353
10 day(s) Pearson r                : 0.05423128
10 day(s) QLIKE                    : 17.06890578
20 day(s) MAE                      : 0.03811015
20 day(s) RMSE                     : 0.05771618
20 day(s) R2                       : -0.59738993
20 day(s) Pearson r                : 0.04204152
20 day(s) QLIKE                    : 16.94839515
full horizon MAE                   : 0.03811015
full horizon RMSE                  : 0.05771618
full horizon R2                    : -0.59738993
full horizon Pearson r             : 0.04204152
full horizon QLIKE                 : 16.94839515

--- Task 3 ---
1 day(s) MAE                       : 0.31759312
1 day(s) RMSE                      : 0.46096026
1 day(s) R2                        : -1.03069792
1 day(s) Pearson r                 : -0.25387275
1 day(s) QLIKE                     : 8.40762181
3 day(s) MAE                       : 0.31070750
3 day(s) RMSE                      : 0.45751041
3 day(s) R2                        : -1.00082529
3 day(s) Pearson r                 : -0.24431160
3 day(s) QLIKE                     : 8.42222126
5 day(s) MAE                       : 0.30876127
5 day(s) RMSE                      : 0.45739665
5 day(s) R2                        : -1.00012178
5 day(s) Pearson r                 : -0.23973133
5 day(s) QLIKE                     : 8.30785568
10 day(s) MAE                      : 0.30860599
10 day(s) RMSE                     : 0.45411696
10 day(s) R2                       : -0.98261497
10 day(s) Pearson r                : -0.23547205
10 day(s) QLIKE                    : 8.21486230
20 day(s) MAE                      : 0.30142216
20 day(s) RMSE                     : 0.45121772
20 day(s) R2                       : -1.00151273
20 day(s) Pearson r                : -0.22723764
20 day(s) QLIKE                    : 8.16816411
full horizon MAE                   : 0.30142216
full horizon RMSE                  : 0.45121772
full horizon R2                    : -1.00151273
full horizon Pearson r             : -0.22723764
full horizon QLIKE                 : 8.16816411

--- Task 4 ---
1 day(s) MAE                       : 0.12370900
1 day(s) RMSE                      : 0.25701673
1 day(s) R2                        : -0.28949345
1 day(s) Pearson r                 : -0.12937874
1 day(s) QLIKE                     : 8.39590494
3 day(s) MAE                       : 0.12376284
3 day(s) RMSE                      : 0.25699978
3 day(s) R2                        : -0.28946828
3 day(s) Pearson r                 : -0.12936346
3 day(s) QLIKE                     : 8.24286418
5 day(s) MAE                       : 0.12365344
5 day(s) RMSE                      : 0.25694019
5 day(s) R2                        : -0.28900560
5 day(s) Pearson r                 : -0.12813769
5 day(s) QLIKE                     : 8.21962200
10 day(s) MAE                      : 0.12289289
10 day(s) RMSE                     : 0.25592028
10 day(s) R2                       : -0.28791632
10 day(s) Pearson r                : -0.12774310
10 day(s) QLIKE                    : 8.10228091
20 day(s) MAE                      : 0.12028802
20 day(s) RMSE                     : 0.25198707
20 day(s) R2                       : -0.28604155
20 day(s) Pearson r                : -0.13674599
20 day(s) QLIKE                    : 7.75816848
full horizon MAE                   : 0.12028802
full horizon RMSE                  : 0.25198707
full horizon R2                    : -0.28604155
full horizon Pearson r             : -0.13674599
full horizon QLIKE                 : 7.75816848

--- Task 5 ---
1 day(s) MAE                       : 0.03977641
1 day(s) RMSE                      : 0.05893167
1 day(s) R2                        : -0.73075295
1 day(s) Pearson r                 : 0.05609695
1 day(s) QLIKE                     : 16.68690647
3 day(s) MAE                       : 0.04024153
3 day(s) RMSE                      : 0.05987984
3 day(s) R2                        : -0.77932313
3 day(s) Pearson r                 : 0.02929184
3 day(s) QLIKE                     : 16.80632118
5 day(s) MAE                       : 0.04035758
5 day(s) RMSE                      : 0.06015892
5 day(s) R2                        : -0.78785839
5 day(s) Pearson r                 : 0.03481701
5 day(s) QLIKE                     : 16.86478300
10 day(s) MAE                      : 0.04042123
10 day(s) RMSE                     : 0.06028893
10 day(s) R2                       : -0.77529609
10 day(s) Pearson r                : 0.05814383
10 day(s) QLIKE                    : 16.97168783
20 day(s) MAE                      : 0.04067649
20 day(s) RMSE                     : 0.06072623
20 day(s) R2                       : -0.76205558
20 day(s) Pearson r                : 0.05985624
20 day(s) QLIKE                    : 17.09257053
full horizon MAE                   : 0.04067649
full horizon RMSE                  : 0.06072623
full horizon R2                    : -0.76205558
full horizon Pearson r             : 0.05985624
full horizon QLIKE                 : 17.09257053

--- Task 6 ---
1 day(s) MAE                       : 0.92604995
1 day(s) RMSE                      : 1.95676490
1 day(s) R2                        : -10.97788754
1 day(s) Pearson r                 : -0.06112892
1 day(s) QLIKE                     : 0.04170543
3 day(s) MAE                       : 0.92204898
3 day(s) RMSE                      : 1.96903186
3 day(s) R2                        : -11.10566028
3 day(s) Pearson r                 : -0.06467843
3 day(s) QLIKE                     : 0.04147765
5 day(s) MAE                       : 0.92420110
5 day(s) RMSE                      : 1.99093458
5 day(s) R2                        : -11.35600167
5 day(s) Pearson r                 : -0.06746098
5 day(s) QLIKE                     : 0.04164399
10 day(s) MAE                      : 0.92912206
10 day(s) RMSE                     : 2.05030717
10 day(s) R2                       : -12.02016855
10 day(s) Pearson r                : -0.06805644
10 day(s) QLIKE                    : 0.04198280
20 day(s) MAE                      : 0.93656564
20 day(s) RMSE                     : 2.14938283
20 day(s) R2                       : -12.91877110
20 day(s) Pearson r                : -0.06953127
20 day(s) QLIKE                    : 0.04257040
full horizon MAE                   : 0.93656564
full horizon RMSE                  : 2.14938283
full horizon R2                    : -12.91877110
full horizon Pearson r             : -0.06953127
full horizon QLIKE                 : 0.04257040

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/GOLD/Custom_KAN_H20.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.0122381, max=5.42892

=== GOLD | H=20 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 5534
Time steps for y: 20
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (3984, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  17.63876776611196
  Min value:  -0.5790835075307484
Checking X_time_train_core:
Shape: (3984, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.678060596158252
Checking y_train_core (log_mse scaled):
Shape: (3984, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.254162049886622
  Min value:  -5.257736685381158
Checking X_price_val:
Shape: (443, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  11.324782722398346
  Min value:  -0.5755730380699846
Checking X_time_val:
Shape: (443, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_val (log_mse scaled):
Shape: (443, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.7933331106383543
  Min value:  -5.155382782210931
Checking X_price_test:
Shape: (1107, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  6.413509896698947
  Min value:  -0.5768892216495461
Checking X_time_test:
Shape: (1107, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.869694456765018
  Min value:  -1.4237179058716
Checking y_test (log_mse scaled):
Shape: (1107, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  1.5432591469134986
  Min value:  -5.155382782210931
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 195s 458ms/step - loss: 0.5464 - val_loss: 0.5462 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 63s 503ms/step - loss: 0.3717 - val_loss: 0.5982 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 62s 499ms/step - loss: 0.3572 - val_loss: 0.8529 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 59s 470ms/step - loss: 0.3443 - val_loss: 0.5765 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 60s 477ms/step - loss: 0.3287 - val_loss: 0.5163 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 63s 504ms/step - loss: 0.3148 - val_loss: 0.5989 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 64s 512ms/step - loss: 0.3023 - val_loss: 0.5001 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 61s 488ms/step - loss: 0.2947 - val_loss: 0.5861 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 61s 491ms/step - loss: 0.2837 - val_loss: 0.5024 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=10 tf_ratio=0.526 -> TF=ON
Epoch 10/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 66s 524ms/step - loss: 0.2707 - val_loss: 1.0611 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=11 tf_ratio=0.474 -> TF=ON
Epoch 11/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 61s 489ms/step - loss: 0.2646 - val_loss: 0.5425 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=12 tf_ratio=0.421 -> TF=ON
Epoch 12/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 0s 472ms/step - loss: 0.2530
Epoch 12: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
125/125 ━━━━━━━━━━━━━━━━━━━━ 61s 486ms/step - loss: 0.2575 - val_loss: 0.5422 - learning_rate: 5.0000e-04
Epoch 12: early stopping
Restoring model weights from the end of the best epoch: 7.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 20
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.40302157
1 day(s) RMSE                      : 0.69898184
1 day(s) R2                        : -0.13770927
1 day(s) Pearson r                 : 0.43282981
1 day(s) QLIKE                     : 0.53211307
3 day(s) MAE                       : 0.40656036
3 day(s) RMSE                      : 0.70161632
3 day(s) R2                        : -0.14534434
3 day(s) Pearson r                 : 0.07533105
3 day(s) QLIKE                     : 0.53690361
5 day(s) MAE                       : 0.40477713
5 day(s) RMSE                      : 0.70034480
5 day(s) R2                        : -0.13795005
5 day(s) Pearson r                 : 0.06461583
5 day(s) QLIKE                     : 0.53718241
10 day(s) MAE                      : 0.41553954
10 day(s) RMSE                     : 0.69186777
10 day(s) R2                       : -0.09818125
10 day(s) Pearson r                : 0.00143268
10 day(s) QLIKE                    : 0.57002434
20 day(s) MAE                      : 0.42617477
20 day(s) RMSE                     : 0.69087198
20 day(s) R2                       : -0.07886021
20 day(s) Pearson r                : 0.00828061
20 day(s) QLIKE                    : 0.58142114
full horizon MAE                   : 0.42617477
full horizon RMSE                  : 0.69087198
full horizon R2                    : -0.07886021
full horizon Pearson r             : 0.00828061
full horizon QLIKE                 : 0.58142114

--- Task 2 ---
1 day(s) MAE                       : 0.04022105
1 day(s) RMSE                      : 0.06100197
1 day(s) R2                        : -0.76885995
1 day(s) Pearson r                 : 0.10445735
1 day(s) QLIKE                     : 7.86752618
3 day(s) MAE                       : 0.04026904
3 day(s) RMSE                      : 0.06102399
3 day(s) R2                        : -0.77126350
3 day(s) Pearson r                 : 0.01935441
3 day(s) QLIKE                     : 13.01171333
5 day(s) MAE                       : 0.04031677
5 day(s) RMSE                      : 0.06104488
5 day(s) R2                        : -0.77359762
5 day(s) Pearson r                 : 0.01332185
5 day(s) QLIKE                     : 10.96544809
10 day(s) MAE                      : 0.04035343
10 day(s) RMSE                     : 0.06103299
10 day(s) R2                       : -0.77665520
10 day(s) Pearson r                : 0.00868213
10 day(s) QLIKE                    : 9.48994061
20 day(s) MAE                      : 0.04026935
20 day(s) RMSE                     : 0.06088501
20 day(s) R2                       : -0.77760988
20 day(s) Pearson r                : 0.00641044
20 day(s) QLIKE                    : 8.69438089
full horizon MAE                   : 0.04026935
full horizon RMSE                  : 0.06088501
full horizon R2                    : -0.77760988
full horizon Pearson r             : 0.00641044
full horizon QLIKE                 : 8.69438089

--- Task 3 ---
1 day(s) MAE                       : 0.56906231
1 day(s) RMSE                      : 0.58352451
1 day(s) R2                        : -2.25414247
1 day(s) Pearson r                 : -0.02915053
1 day(s) QLIKE                     : 3.46604667
3 day(s) MAE                       : 0.72214881
3 day(s) RMSE                      : 0.78005114
3 day(s) R2                        : -4.81638789
3 day(s) Pearson r                 : -0.00023292
3 day(s) QLIKE                     : 9.36959796
5 day(s) MAE                       : 0.75278398
5 day(s) RMSE                      : 0.81370180
5 day(s) R2                        : -5.32996108
5 day(s) Pearson r                 : -0.00012834
5 day(s) QLIKE                     : 8.14960635
10 day(s) MAE                      : 0.77656840
10 day(s) RMSE                     : 0.83847383
10 day(s) R2                       : -5.75899250
10 day(s) Pearson r                : -0.00093015
10 day(s) QLIKE                    : 7.65167515
20 day(s) MAE                      : 0.79133054
20 day(s) RMSE                     : 0.85212259
20 day(s) R2                       : -6.13822471
20 day(s) Pearson r                : -0.00299377
20 day(s) QLIKE                    : 6.76924796
full horizon MAE                   : 0.79133054
full horizon RMSE                  : 0.85212259
full horizon R2                    : -6.13822471
full horizon Pearson r             : -0.00299377
full horizon QLIKE                 : 6.76924796

--- Task 4 ---
1 day(s) MAE                       : 0.12226108
1 day(s) RMSE                      : 0.25688480
1 day(s) R2                        : -0.28816994
1 day(s) Pearson r                 : -0.03961900
1 day(s) QLIKE                     : 1.87859748
3 day(s) MAE                       : 0.12345032
3 day(s) RMSE                      : 0.25757257
3 day(s) R2                        : -0.29522254
3 day(s) Pearson r                 : -0.00003581
3 day(s) QLIKE                     : 1.93394145
5 day(s) MAE                       : 0.12404239
5 day(s) RMSE                      : 0.25790370
5 day(s) R2                        : -0.29869109
5 day(s) Pearson r                 : -0.00133074
5 day(s) QLIKE                     : 1.95527426
10 day(s) MAE                      : 0.12410952
10 day(s) RMSE                     : 0.25729946
10 day(s) R2                       : -0.30183511
10 day(s) Pearson r                : -0.00591607
10 day(s) QLIKE                    : 2.00974190
20 day(s) MAE                      : 0.12166277
20 day(s) RMSE                     : 0.25325290
20 day(s) R2                       : -0.29899460
20 day(s) Pearson r                : -0.01046430
20 day(s) QLIKE                    : 1.96447427
full horizon MAE                   : 0.12166277
full horizon RMSE                  : 0.25325290
full horizon R2                    : -0.29899460
full horizon Pearson r             : -0.01046430
full horizon QLIKE                 : 1.96447427

--- Task 5 ---
1 day(s) MAE                       : 0.03985587
1 day(s) RMSE                      : 0.05989335
1 day(s) R2                        : -0.78770086
1 day(s) Pearson r                 : 0.19599300
1 day(s) QLIKE                     : 5.83384583
3 day(s) MAE                       : 0.04001949
3 day(s) RMSE                      : 0.06011638
3 day(s) R2                        : -0.79340873
3 day(s) Pearson r                 : 0.00144920
3 day(s) QLIKE                     : 9.42073126
5 day(s) MAE                       : 0.04012704
5 day(s) RMSE                      : 0.06027281
5 day(s) R2                        : -0.79463420
5 day(s) Pearson r                 : -0.00005896
5 day(s) QLIKE                     : 13.97265199
10 day(s) MAE                      : 0.04036285
10 day(s) RMSE                     : 0.06062803
10 day(s) R2                       : -0.79532266
10 day(s) Pearson r                : -0.00176169
10 day(s) QLIKE                    : 10.47031878
20 day(s) MAE                      : 0.04081148
20 day(s) RMSE                     : 0.06130255
20 day(s) R2                       : -0.79566000
20 day(s) Pearson r                : -0.00350911
20 day(s) QLIKE                    : 8.45564084
full horizon MAE                   : 0.04081148
full horizon RMSE                  : 0.06130255
full horizon R2                    : -0.79566000
full horizon Pearson r             : -0.00350911
full horizon QLIKE                 : 8.45564084

--- Task 6 ---
1 day(s) MAE                       : 0.47788595
1 day(s) RMSE                      : 0.57921693
1 day(s) R2                        : -0.04950723
1 day(s) Pearson r                 : -0.11242209
1 day(s) QLIKE                     : 0.01132132
3 day(s) MAE                       : 0.47485629
3 day(s) RMSE                      : 0.57824143
3 day(s) R2                        : -0.04400219
3 day(s) Pearson r                 : -0.04973741
3 day(s) QLIKE                     : 0.01137736
5 day(s) MAE                       : 0.49140690
5 day(s) RMSE                      : 0.58987402
5 day(s) R2                        : -0.08463227
5 day(s) Pearson r                 : -0.00326826
5 day(s) QLIKE                     : 0.01142511
10 day(s) MAE                      : 0.61359305
10 day(s) RMSE                     : 0.72670430
10 day(s) R2                       : -0.63566442
10 day(s) Pearson r                : 0.01150166
10 day(s) QLIKE                    : 0.01319750
20 day(s) MAE                      : 0.69386184
20 day(s) RMSE                     : 0.80717239
20 day(s) R2                       : -0.96293506
20 day(s) Pearson r                : 0.01112487
20 day(s) QLIKE                    : 0.01331264
full horizon MAE                   : 0.69386184
full horizon RMSE                  : 0.80717239
full horizon R2                    : -0.96293506
full horizon Pearson r             : 0.01112487
full horizon QLIKE                 : 0.01331264

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/GOLD/Custom_KAN_LSTM_H20.pkl

Saved y_true min=0.00380168, max=7.43162
Saved y_pred min=0.246585, max=0.993158

=== SP500 | H=1 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3466
Time steps for y: 1
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790344
  Min value:  -0.29186160079947243
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9996639755036183
  Min value:  -28.079833393572628
Checking X_price_val:
Shape: (277, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.189039765537116
  Min value:  -0.288808876061582
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.544997163695437
  Min value:  -28.079833393572628
Checking X_price_test:
Shape: (694, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.65876861006644
  Min value:  -0.2909517549064528
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.3795163284544945
  Min value:  -28.079833393572628
Epoch 001 | phase=1 | train_loss=4.9654 | val_main=1.238538
Epoch 002 | phase=1 | train_loss=4.8117 | val_main=1.237960
Epoch 003 | phase=1 | train_loss=4.7495 | val_main=1.237837
Epoch 004 | phase=1 | train_loss=4.7174 | val_main=1.237854
Epoch 005 | phase=1 | train_loss=4.6766 | val_main=1.237792
Epoch 006 | phase=1 | train_loss=4.6357 | val_main=1.237554
Epoch 007 | phase=1 | train_loss=4.6698 | val_main=1.237261
Epoch 008 | phase=1 | train_loss=4.6188 | val_main=1.237086
Epoch 009 | phase=1 | train_loss=4.5474 | val_main=1.236509
Epoch 010 | phase=1 | train_loss=4.5369 | val_main=1.236712
Epoch 011 | phase=1 | train_loss=4.3913 | val_main=1.237028
Epoch 012 | phase=1 | train_loss=4.2929 | val_main=1.237298
Epoch 013 | phase=1 | train_loss=4.1470 | val_main=1.237204
Epoch 014 | phase=1 | train_loss=4.0238 | val_main=1.236947
Epoch 015 | phase=1 | train_loss=3.8078 | val_main=1.236921
Epoch 016 | phase=0 | train_loss=4.7641 | val_main=1.050058
Epoch 017 | phase=0 | train_loss=4.3869 | val_main=0.757068
Epoch 018 | phase=0 | train_loss=4.3681 | val_main=0.654863
Epoch 019 | phase=0 | train_loss=3.9578 | val_main=0.553593
Epoch 020 | phase=0 | train_loss=3.6922 | val_main=0.499560
Epoch 021 | phase=0 | train_loss=3.6527 | val_main=0.489510
Epoch 022 | phase=0 | train_loss=3.6307 | val_main=0.489011
Epoch 023 | phase=0 | train_loss=3.5573 | val_main=0.484937
Epoch 024 | phase=0 | train_loss=3.4973 | val_main=0.499002
Epoch 025 | phase=0 | train_loss=3.4025 | val_main=0.502311
Epoch 026 | phase=0 | train_loss=3.3033 | val_main=0.513120
Epoch 027 | phase=0 | train_loss=3.2074 | val_main=0.502723
Epoch 028 | phase=0 | train_loss=3.2644 | val_main=0.501087
Epoch 029 | phase=0 | train_loss=3.2862 | val_main=0.508420
Epoch 030 | phase=0 | train_loss=3.1255 | val_main=0.525873
Epoch 031 | phase=2 | train_loss=0.3966 | val_main=0.519985
Epoch 032 | phase=2 | train_loss=0.3883 | val_main=0.528568
Epoch 033 | phase=2 | train_loss=0.3845 | val_main=0.523267
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 6
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.48477420
1 day(s) RMSE                      : 2.24711640
1 day(s) R2                        : 0.37025505
1 day(s) Pearson r                 : 0.65268579
1 day(s) QLIKE                     : 0.44364619
full horizon MAE                   : 0.48477420
full horizon RMSE                  : 2.24711640
full horizon R2                    : 0.37025505
full horizon Pearson r             : 0.65268579
full horizon QLIKE                 : 0.44364619

--- Task 2 ---
1 day(s) MAE                       : 0.07727403
1 day(s) RMSE                      : 0.08826825
1 day(s) R2                        : -1.63901666
1 day(s) Pearson r                 : 0.23614900
1 day(s) QLIKE                     : 7.25172692
full horizon MAE                   : 0.07727403
full horizon RMSE                  : 0.08826825
full horizon R2                    : -1.63901666
full horizon Pearson r             : 0.23614900
full horizon QLIKE                 : 7.25172692

--- Task 3 ---
1 day(s) MAE                       : 0.31877575
1 day(s) RMSE                      : 0.43687147
1 day(s) R2                        : -46.46592831
1 day(s) Pearson r                 : -0.01832788
1 day(s) QLIKE                     : 0.77568128
full horizon MAE                   : 0.31877575
full horizon RMSE                  : 0.43687147
full horizon R2                    : -46.46592831
full horizon Pearson r             : -0.01832788
full horizon QLIKE                 : 0.77568128

--- Task 4 ---
1 day(s) MAE                       : 0.03229071
1 day(s) RMSE                      : 0.03899003
1 day(s) R2                        : -1.90176847
1 day(s) Pearson r                 : -0.12596547
1 day(s) QLIKE                     : 1.19703047
full horizon MAE                   : 0.03229071
full horizon RMSE                  : 0.03899003
full horizon R2                    : -1.90176847
full horizon Pearson r             : -0.12596547
full horizon QLIKE                 : 1.19703047

--- Task 5 ---
1 day(s) MAE                       : 0.02640609
1 day(s) RMSE                      : 0.03439154
1 day(s) R2                        : -0.22000324
1 day(s) Pearson r                 : 0.18096763
1 day(s) QLIKE                     : 0.42612345
full horizon MAE                   : 0.02640609
full horizon RMSE                  : 0.03439154
full horizon R2                    : -0.22000324
full horizon Pearson r             : 0.18096763
full horizon QLIKE                 : 0.42612345

--- Task 6 ---
1 day(s) MAE                       : 2.67525934
1 day(s) RMSE                      : 3.29786144
1 day(s) R2                        : -1.54937039
1 day(s) Pearson r                 : 0.06064200
1 day(s) QLIKE                     : 0.08644298
full horizon MAE                   : 2.67525934
full horizon RMSE                  : 3.29786144
full horizon R2                    : -1.54937039
full horizon Pearson r             : 0.06064200
full horizon QLIKE                 : 0.08644298

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/SP500/Custom_KAN_H1.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.0294916, max=15.0328

=== SP500 | H=1 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 3466
Time steps for y: 1
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790344
  Min value:  -0.29186160079947243
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.9996639755036183
  Min value:  -28.079833393572628
Checking X_price_val:
Shape: (277, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.189039765537116
  Min value:  -0.288808876061582
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.544997163695437
  Min value:  -28.079833393572628
Checking X_price_test:
Shape: (694, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.65876861006644
  Min value:  -0.2909517549064528
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 1, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.3795163284544945
  Min value:  -28.079833393572628
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 176s 338ms/step - loss: 1.0036 - val_loss: 1.1506 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 15s 189ms/step - loss: 0.9641 - val_loss: 0.8365 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 16s 205ms/step - loss: 0.9574 - val_loss: 0.8473 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 15s 194ms/step - loss: 0.9445 - val_loss: 0.8151 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 15s 192ms/step - loss: 0.9466 - val_loss: 0.7790 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 15s 195ms/step - loss: 0.9428 - val_loss: 0.8176 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 16s 201ms/step - loss: 0.9382 - val_loss: 0.8526 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 17s 212ms/step - loss: 0.9381 - val_loss: 0.8040 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 18s 226ms/step - loss: 0.9324 - val_loss: 0.8013 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=10 tf_ratio=0.526 -> TF=ON
Epoch 10/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 0s 225ms/step - loss: 0.9979
Epoch 10: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
78/78 ━━━━━━━━━━━━━━━━━━━━ 18s 232ms/step - loss: 0.9331 - val_loss: 0.8049 - learning_rate: 5.0000e-04
Epoch 10: early stopping
Restoring model weights from the end of the best epoch: 5.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 1
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.58192507
1 day(s) RMSE                      : 2.71371725
1 day(s) R2                        : 0.08157701
1 day(s) Pearson r                 : 0.47143322
1 day(s) QLIKE                     : 0.61688294
full horizon MAE                   : 0.58192507
full horizon RMSE                  : 2.71371725
full horizon R2                    : 0.08157701
full horizon Pearson r             : 0.47143322
full horizon QLIKE                 : 0.61688294

--- Task 2 ---
1 day(s) MAE                       : 0.03507331
1 day(s) RMSE                      : 0.04995514
1 day(s) R2                        : 0.15473384
1 day(s) Pearson r                 : 0.54353732
1 day(s) QLIKE                     : 2.53169985
full horizon MAE                   : 0.03507331
full horizon RMSE                  : 0.04995514
full horizon R2                    : 0.15473384
full horizon Pearson r             : 0.54353732
full horizon QLIKE                 : 2.53169985

--- Task 3 ---
1 day(s) MAE                       : 0.38799865
1 day(s) RMSE                      : 0.40280362
1 day(s) R2                        : -39.35165332
1 day(s) Pearson r                 : -0.36921924
1 day(s) QLIKE                     : 0.01117270
full horizon MAE                   : 0.38799865
full horizon RMSE                  : 0.40280362
full horizon R2                    : -39.35165332
full horizon Pearson r             : -0.36921924
full horizon QLIKE                 : 0.01117270

--- Task 4 ---
1 day(s) MAE                       : 0.02304827
1 day(s) RMSE                      : 0.02633547
1 day(s) R2                        : -0.32384720
1 day(s) Pearson r                 : -0.38899153
1 day(s) QLIKE                     : 1.13479390
full horizon MAE                   : 0.02304827
full horizon RMSE                  : 0.02633547
full horizon R2                    : -0.32384720
full horizon Pearson r             : -0.38899153
full horizon QLIKE                 : 1.13479390

--- Task 5 ---
1 day(s) MAE                       : 0.02530964
1 day(s) RMSE                      : 0.03152965
1 day(s) R2                        : -0.02540674
1 day(s) Pearson r                 : -0.10536220
1 day(s) QLIKE                     : 0.43159889
full horizon MAE                   : 0.02530964
full horizon RMSE                  : 0.03152965
full horizon R2                    : -0.02540674
full horizon Pearson r             : -0.10536220
full horizon QLIKE                 : 0.43159889

--- Task 6 ---
1 day(s) MAE                       : 2.21325842
1 day(s) RMSE                      : 2.73766994
1 day(s) R2                        : -0.75683229
1 day(s) Pearson r                 : -0.34709459
1 day(s) QLIKE                     : 0.06320649
full horizon MAE                   : 2.21325842
full horizon RMSE                  : 2.73766994
full horizon R2                    : -0.75683229
full horizon Pearson r             : -0.34709459
full horizon QLIKE                 : 0.06320649

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/SP500/Custom_KAN_LSTM_H1.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.197013, max=2.03264

=== SP500 | H=5 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3466
Time steps for y: 5
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790344
  Min value:  -0.29186160079947243
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.008465261938893
  Min value:  -28.084553036741752
Checking X_price_val:
Shape: (277, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.189039765537116
  Min value:  -0.288808876061582
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5447500237882554
  Min value:  -28.084553036741752
Checking X_price_test:
Shape: (694, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.65876861006644
  Min value:  -0.2909517549064528
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.3784015875360596
  Min value:  -28.084553036741752
Epoch 001 | phase=1 | train_loss=5.0084 | val_main=1.233486
Epoch 002 | phase=1 | train_loss=4.8260 | val_main=1.232634
Epoch 003 | phase=1 | train_loss=4.7092 | val_main=1.232522
Epoch 004 | phase=1 | train_loss=4.6919 | val_main=1.232620
Epoch 005 | phase=1 | train_loss=4.6401 | val_main=1.232290
Epoch 006 | phase=1 | train_loss=4.6303 | val_main=1.232328
Epoch 007 | phase=1 | train_loss=4.7869 | val_main=1.232030
Epoch 008 | phase=1 | train_loss=4.5370 | val_main=1.232185
Epoch 009 | phase=1 | train_loss=4.5850 | val_main=1.232516
Epoch 010 | phase=1 | train_loss=4.4779 | val_main=1.232668
Epoch 011 | phase=1 | train_loss=4.4559 | val_main=1.232618
Epoch 012 | phase=1 | train_loss=4.2938 | val_main=1.232922
Epoch 013 | phase=1 | train_loss=4.1667 | val_main=1.232609
Epoch 014 | phase=1 | train_loss=4.0803 | val_main=1.232663
Epoch 015 | phase=1 | train_loss=3.9149 | val_main=1.232737
Epoch 016 | phase=0 | train_loss=4.7678 | val_main=1.150310
Epoch 017 | phase=0 | train_loss=4.4972 | val_main=0.811819
Epoch 018 | phase=0 | train_loss=4.4235 | val_main=0.792201
Epoch 019 | phase=0 | train_loss=4.2295 | val_main=0.757253
Epoch 020 | phase=0 | train_loss=4.2049 | val_main=0.756392
Epoch 021 | phase=0 | train_loss=4.0580 | val_main=0.773481
Epoch 022 | phase=0 | train_loss=4.0221 | val_main=0.759606
Epoch 023 | phase=0 | train_loss=4.0701 | val_main=0.747288
Epoch 024 | phase=0 | train_loss=3.9950 | val_main=0.754625
Epoch 025 | phase=0 | train_loss=3.8772 | val_main=0.751282
Epoch 026 | phase=0 | train_loss=3.8398 | val_main=0.767459
Epoch 027 | phase=0 | train_loss=3.6902 | val_main=0.770717
Epoch 028 | phase=0 | train_loss=3.7262 | val_main=0.773292
Epoch 029 | phase=0 | train_loss=3.8376 | val_main=0.773843
Epoch 030 | phase=0 | train_loss=3.8188 | val_main=0.767685
Epoch 031 | phase=2 | train_loss=0.6243 | val_main=0.778505
Epoch 032 | phase=2 | train_loss=0.6441 | val_main=0.780467
Epoch 033 | phase=2 | train_loss=0.6194 | val_main=0.793580
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 30
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.55973660
1 day(s) RMSE                      : 2.31164393
1 day(s) R2                        : 0.33356863
1 day(s) Pearson r                 : 0.58162315
1 day(s) QLIKE                     : 0.58671698
3 day(s) MAE                       : 0.60524988
3 day(s) RMSE                      : 2.58292200
3 day(s) R2                        : 0.16801567
3 day(s) Pearson r                 : 0.46160456
3 day(s) QLIKE                     : 0.61852362
5 day(s) MAE                       : 0.65075375
5 day(s) RMSE                      : 2.80371312
5 day(s) R2                        : 0.01975456
5 day(s) Pearson r                 : 0.34426963
5 day(s) QLIKE                     : 0.66975733
full horizon MAE                   : 0.65075375
full horizon RMSE                  : 2.80371312
full horizon R2                    : 0.01975456
full horizon Pearson r             : 0.34426963
full horizon QLIKE                 : 0.66975733

--- Task 2 ---
1 day(s) MAE                       : 0.07309016
1 day(s) RMSE                      : 0.09658281
1 day(s) R2                        : -2.15960504
1 day(s) Pearson r                 : 0.43712608
1 day(s) QLIKE                     : 7.34765287
3 day(s) MAE                       : 0.06737480
3 day(s) RMSE                      : 0.08394057
3 day(s) R2                        : -1.35449774
3 day(s) Pearson r                 : 0.39061380
3 day(s) QLIKE                     : 7.32992279
5 day(s) MAE                       : 0.06812769
5 day(s) RMSE                      : 0.08220082
5 day(s) R2                        : -1.22671924
5 day(s) Pearson r                 : 0.37134927
5 day(s) QLIKE                     : 7.40468125
full horizon MAE                   : 0.06812769
full horizon RMSE                  : 0.08220082
full horizon R2                    : -1.22671924
full horizon Pearson r             : 0.37134927
full horizon QLIKE                 : 7.40468125

--- Task 3 ---
1 day(s) MAE                       : 0.30920290
1 day(s) RMSE                      : 0.42250405
1 day(s) R2                        : -43.39523618
1 day(s) Pearson r                 : -0.03193439
1 day(s) QLIKE                     : 0.65939620
3 day(s) MAE                       : 0.31146694
3 day(s) RMSE                      : 0.42118327
3 day(s) R2                        : -42.82677786
3 day(s) Pearson r                 : -0.04145575
3 day(s) QLIKE                     : 0.63096659
5 day(s) MAE                       : 0.30963711
5 day(s) RMSE                      : 0.42039191
5 day(s) R2                        : -42.40595594
5 day(s) Pearson r                 : -0.03879923
5 day(s) QLIKE                     : 0.61863716
full horizon MAE                   : 0.30963711
full horizon RMSE                  : 0.42039191
full horizon R2                    : -42.40595594
full horizon Pearson r             : -0.03879923
full horizon QLIKE                 : 0.61863716

--- Task 4 ---
1 day(s) MAE                       : 0.03296584
1 day(s) RMSE                      : 0.04022714
1 day(s) R2                        : -2.08883055
1 day(s) Pearson r                 : -0.13481319
1 day(s) QLIKE                     : 1.19691988
3 day(s) MAE                       : 0.03286721
3 day(s) RMSE                      : 0.04013032
3 day(s) R2                        : -2.06133311
3 day(s) Pearson r                 : -0.12727319
3 day(s) QLIKE                     : 1.19564929
5 day(s) MAE                       : 0.03276573
5 day(s) RMSE                      : 0.03987454
5 day(s) R2                        : -2.01012181
5 day(s) Pearson r                 : -0.12340933
5 day(s) QLIKE                     : 1.19530503
full horizon MAE                   : 0.03276573
full horizon RMSE                  : 0.03987454
full horizon R2                    : -2.01012181
full horizon Pearson r             : -0.12340933
full horizon QLIKE                 : 1.19530503

--- Task 5 ---
1 day(s) MAE                       : 0.02690290
1 day(s) RMSE                      : 0.03447128
1 day(s) R2                        : -0.22566703
1 day(s) Pearson r                 : 0.12024437
1 day(s) QLIKE                     : 0.43668273
3 day(s) MAE                       : 0.02675343
3 day(s) RMSE                      : 0.03434439
3 day(s) R2                        : -0.22311717
3 day(s) Pearson r                 : 0.12410513
3 day(s) QLIKE                     : 0.43436728
5 day(s) MAE                       : 0.02651796
5 day(s) RMSE                      : 0.03401249
5 day(s) R2                        : -0.20631745
5 day(s) Pearson r                 : 0.13754611
5 day(s) QLIKE                     : 0.42616766
full horizon MAE                   : 0.02651796
full horizon RMSE                  : 0.03401249
full horizon R2                    : -0.20631745
full horizon Pearson r             : 0.13754611
full horizon QLIKE                 : 0.42616766

--- Task 6 ---
1 day(s) MAE                       : 2.68266943
1 day(s) RMSE                      : 3.32710080
1 day(s) R2                        : -1.59477703
1 day(s) Pearson r                 : 0.04528192
1 day(s) QLIKE                     : 0.08761525
3 day(s) MAE                       : 2.67464825
3 day(s) RMSE                      : 3.31876202
3 day(s) R2                        : -1.57502267
3 day(s) Pearson r                 : 0.05084459
3 day(s) QLIKE                     : 0.08708968
5 day(s) MAE                       : 2.66384731
5 day(s) RMSE                      : 3.30359605
5 day(s) R2                        : -1.54398642
5 day(s) Pearson r                 : 0.05614479
5 day(s) QLIKE                     : 0.08647860
full horizon MAE                   : 2.66384731
full horizon RMSE                  : 3.30359605
full horizon R2                    : -1.54398642
full horizon Pearson r             : 0.05614479
full horizon QLIKE                 : 0.08647860

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/SP500/Custom_KAN_H5.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.143876, max=30.4168

=== SP500 | H=5 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 3466
Time steps for y: 5
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790344
  Min value:  -0.29186160079947243
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.008465261938893
  Min value:  -28.084553036741752
Checking X_price_val:
Shape: (277, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.189039765537116
  Min value:  -0.288808876061582
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5447500237882554
  Min value:  -28.084553036741752
Checking X_price_test:
Shape: (694, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.65876861006644
  Min value:  -0.2909517549064528
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 5, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.3784015875360596
  Min value:  -28.084553036741752
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 172s 366ms/step - loss: 0.8092 - val_loss: 1.1127 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 19s 240ms/step - loss: 0.6023 - val_loss: 1.3889 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 20s 250ms/step - loss: 0.5600 - val_loss: 0.8720 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 20s 252ms/step - loss: 0.5377 - val_loss: 1.1287 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 19s 245ms/step - loss: 0.5257 - val_loss: 1.2308 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 19s 248ms/step - loss: 0.5191 - val_loss: 1.2710 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 21s 264ms/step - loss: 0.5094 - val_loss: 1.1616 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 0s 297ms/step - loss: 0.5826
Epoch 8: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
78/78 ━━━━━━━━━━━━━━━━━━━━ 24s 306ms/step - loss: 0.5027 - val_loss: 1.0760 - learning_rate: 5.0000e-04
Epoch 8: early stopping
Restoring model weights from the end of the best epoch: 3.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 5
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.61838397
1 day(s) RMSE                      : 2.80015597
1 day(s) R2                        : 0.02213703
1 day(s) Pearson r                 : 0.34830508
1 day(s) QLIKE                     : 0.69574540
3 day(s) MAE                       : 0.63451976
3 day(s) RMSE                      : 2.77650504
3 day(s) R2                        : 0.03863237
3 day(s) Pearson r                 : 0.29721195
3 day(s) QLIKE                     : 0.71631177
5 day(s) MAE                       : 0.64236976
5 day(s) RMSE                      : 2.80491641
5 day(s) R2                        : 0.01891298
5 day(s) Pearson r                 : 0.23538775
5 day(s) QLIKE                     : 0.79835126
full horizon MAE                   : 0.64236976
full horizon RMSE                  : 2.80491641
full horizon R2                    : 0.01891298
full horizon Pearson r             : 0.23538775
full horizon QLIKE                 : 0.79835126

--- Task 2 ---
1 day(s) MAE                       : 0.03462909
1 day(s) RMSE                      : 0.05217154
1 day(s) R2                        : 0.07806490
1 day(s) Pearson r                 : 0.47988074
1 day(s) QLIKE                     : 2.52537106
3 day(s) MAE                       : 0.04271142
3 day(s) RMSE                      : 0.06083719
3 day(s) R2                        : -0.23678069
3 day(s) Pearson r                 : 0.40501661
3 day(s) QLIKE                     : 13.01419698
5 day(s) MAE                       : 0.04660316
5 day(s) RMSE                      : 0.06645819
5 day(s) R2                        : -0.45549311
5 day(s) Pearson r                 : 0.34224219
5 day(s) QLIKE                     : 13.59849269
full horizon MAE                   : 0.04660316
full horizon RMSE                  : 0.06645819
full horizon R2                    : -0.45549311
full horizon Pearson r             : 0.34224219
full horizon QLIKE                 : 13.59849269

--- Task 3 ---
1 day(s) MAE                       : 0.33265610
1 day(s) RMSE                      : 0.33800832
1 day(s) R2                        : -27.41380525
1 day(s) Pearson r                 : 0.39645476
1 day(s) QLIKE                     : 0.00218268
3 day(s) MAE                       : 0.24035282
3 day(s) RMSE                      : 0.25940490
3 day(s) R2                        : -15.62469461
3 day(s) Pearson r                 : 0.04353049
3 day(s) QLIKE                     : 0.00916784
5 day(s) MAE                       : 0.20006462
5 day(s) RMSE                      : 0.22404465
5 day(s) R2                        : -11.32848696
5 day(s) Pearson r                 : 0.01437848
5 day(s) QLIKE                     : 0.00995198
full horizon MAE                   : 0.20006462
full horizon RMSE                  : 0.22404465
full horizon R2                    : -11.32848696
full horizon Pearson r             : 0.01437848
full horizon QLIKE                 : 0.00995198

--- Task 4 ---
1 day(s) MAE                       : 0.02397391
1 day(s) RMSE                      : 0.02722318
1 day(s) R2                        : -0.41459960
1 day(s) Pearson r                 : -0.35253496
1 day(s) QLIKE                     : 1.12740169
3 day(s) MAE                       : 0.02451031
3 day(s) RMSE                      : 0.02779148
3 day(s) R2                        : -0.46821137
3 day(s) Pearson r                 : -0.32528868
3 day(s) QLIKE                     : 1.13022937
5 day(s) MAE                       : 0.02475864
5 day(s) RMSE                      : 0.02808444
5 day(s) R2                        : -0.49322126
5 day(s) Pearson r                 : -0.32459345
5 day(s) QLIKE                     : 1.13483404
full horizon MAE                   : 0.02475864
full horizon RMSE                  : 0.02808444
full horizon R2                    : -0.49322126
full horizon Pearson r             : -0.32459345
full horizon QLIKE                 : 1.13483404

--- Task 5 ---
1 day(s) MAE                       : 0.02495631
1 day(s) RMSE                      : 0.03139459
1 day(s) R2                        : -0.01664071
1 day(s) Pearson r                 : -0.04930959
1 day(s) QLIKE                     : 0.43046855
3 day(s) MAE                       : 0.02462624
3 day(s) RMSE                      : 0.03227479
3 day(s) R2                        : -0.08014818
3 day(s) Pearson r                 : -0.00681117
3 day(s) QLIKE                     : 0.43080153
5 day(s) MAE                       : 0.02475737
5 day(s) RMSE                      : 0.03260740
5 day(s) R2                        : -0.10870783
5 day(s) Pearson r                 : 0.01607667
5 day(s) QLIKE                     : 0.42497477
full horizon MAE                   : 0.02475737
full horizon RMSE                  : 0.03260740
full horizon R2                    : -0.10870783
full horizon Pearson r             : 0.01607667
full horizon QLIKE                 : 0.42497477

--- Task 6 ---
1 day(s) MAE                       : 2.26736001
1 day(s) RMSE                      : 2.80601853
1 day(s) R2                        : -0.84564937
1 day(s) Pearson r                 : -0.35878713
1 day(s) QLIKE                     : 0.06662895
3 day(s) MAE                       : 2.27352679
3 day(s) RMSE                      : 2.81497600
3 day(s) R2                        : -0.85258535
3 day(s) Pearson r                 : -0.36183429
3 day(s) QLIKE                     : 0.06849299
5 day(s) MAE                       : 2.22348084
5 day(s) RMSE                      : 2.75212596
5 day(s) R2                        : -0.76553986
5 day(s) Pearson r                 : -0.31884216
5 day(s) QLIKE                     : 0.06999864
full horizon MAE                   : 2.22348084
full horizon RMSE                  : 2.75212596
full horizon R2                    : -0.76553986
full horizon Pearson r             : -0.31884216
full horizon QLIKE                 : 0.06999864

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/SP500/Custom_KAN_LSTM_H5.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.206683, max=1.96678

=== SP500 | H=10 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3466
Time steps for y: 10
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790344
  Min value:  -0.29186160079947243
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.0210836022185426
  Min value:  -28.086926568760678
Checking X_price_val:
Shape: (277, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.189039765537116
  Min value:  -0.288808876061582
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5448233279041013
  Min value:  -28.086926568760678
Checking X_price_test:
Shape: (694, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.65876861006644
  Min value:  -0.2909517549064528
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.378408171115352
  Min value:  -28.086926568760678
Epoch 001 | phase=1 | train_loss=4.9876 | val_main=1.237347
Epoch 002 | phase=1 | train_loss=4.8772 | val_main=1.240204
Epoch 003 | phase=1 | train_loss=4.6951 | val_main=1.240515
Epoch 004 | phase=1 | train_loss=4.6930 | val_main=1.239956
Epoch 005 | phase=1 | train_loss=4.6363 | val_main=1.240195
Epoch 006 | phase=1 | train_loss=4.6144 | val_main=1.239709
Epoch 007 | phase=1 | train_loss=4.6767 | val_main=1.239914
Epoch 008 | phase=1 | train_loss=4.5785 | val_main=1.239517
Epoch 009 | phase=1 | train_loss=4.5858 | val_main=1.239067
Epoch 010 | phase=1 | train_loss=4.5431 | val_main=1.239375
Epoch 011 | phase=1 | train_loss=4.4298 | val_main=1.238829
Epoch 012 | phase=1 | train_loss=4.3143 | val_main=1.239291
Epoch 013 | phase=1 | train_loss=4.2033 | val_main=1.239293
Epoch 014 | phase=1 | train_loss=4.1124 | val_main=1.239239
Epoch 015 | phase=1 | train_loss=3.9859 | val_main=1.239225
Epoch 016 | phase=0 | train_loss=4.8497 | val_main=1.177064
Epoch 017 | phase=0 | train_loss=4.6467 | val_main=0.846582
Epoch 018 | phase=0 | train_loss=4.5237 | val_main=0.813672
Epoch 019 | phase=0 | train_loss=4.3079 | val_main=0.792672
Epoch 020 | phase=0 | train_loss=4.2476 | val_main=0.813801
Epoch 021 | phase=0 | train_loss=4.2010 | val_main=0.820133
Epoch 022 | phase=0 | train_loss=4.1302 | val_main=0.794638
Epoch 023 | phase=0 | train_loss=4.1710 | val_main=0.781230
Epoch 024 | phase=0 | train_loss=4.2295 | val_main=0.791165
Epoch 025 | phase=0 | train_loss=3.9827 | val_main=0.790353
Epoch 026 | phase=0 | train_loss=3.9379 | val_main=0.812462
Epoch 027 | phase=0 | train_loss=3.8927 | val_main=0.796465
Epoch 028 | phase=0 | train_loss=3.8676 | val_main=0.816236
Epoch 029 | phase=0 | train_loss=3.8813 | val_main=0.794464
Epoch 030 | phase=0 | train_loss=3.8625 | val_main=0.787984
Epoch 031 | phase=2 | train_loss=0.6758 | val_main=0.806552
Epoch 032 | phase=2 | train_loss=0.6829 | val_main=0.803852
Epoch 033 | phase=2 | train_loss=0.6704 | val_main=0.823174
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 60
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.60514302
1 day(s) RMSE                      : 2.53691621
1 day(s) R2                        : 0.19735075
1 day(s) Pearson r                 : 0.60337156
1 day(s) QLIKE                     : 0.59027515
3 day(s) MAE                       : 0.64942608
3 day(s) RMSE                      : 2.78386890
3 day(s) R2                        : 0.03352612
3 day(s) Pearson r                 : 0.48035070
3 day(s) QLIKE                     : 0.62673358
5 day(s) MAE                       : 0.69461693
5 day(s) RMSE                      : 3.01937531
5 day(s) R2                        : -0.13684664
5 day(s) Pearson r                 : 0.35734688
5 day(s) QLIKE                     : 0.68329959
10 day(s) MAE                      : 0.72564175
10 day(s) RMSE                     : 3.10490138
10 day(s) R2                       : -0.20200008
10 day(s) Pearson r                : 0.23661691
10 day(s) QLIKE                    : 0.79233418
full horizon MAE                   : 0.72564175
full horizon RMSE                  : 3.10490138
full horizon R2                    : -0.20200008
full horizon Pearson r             : 0.23661691
full horizon QLIKE                 : 0.79233418

--- Task 2 ---
1 day(s) MAE                       : 0.06179473
1 day(s) RMSE                      : 0.09604238
1 day(s) R2                        : -2.12434467
1 day(s) Pearson r                 : 0.42827435
1 day(s) QLIKE                     : 7.51829087
3 day(s) MAE                       : 0.05987612
3 day(s) RMSE                      : 0.07996760
3 day(s) R2                        : -1.13689181
3 day(s) Pearson r                 : 0.39444335
3 day(s) QLIKE                     : 7.32785355
5 day(s) MAE                       : 0.06137555
5 day(s) RMSE                      : 0.08368315
5 day(s) R2                        : -1.30775185
5 day(s) Pearson r                 : 0.40933917
5 day(s) QLIKE                     : 7.41225689
10 day(s) MAE                      : 0.06214279
10 day(s) RMSE                     : 0.08728484
10 day(s) R2                       : -1.43066109
10 day(s) Pearson r                : 0.39490579
10 day(s) QLIKE                    : 7.44993911
full horizon MAE                   : 0.06214279
full horizon RMSE                  : 0.08728484
full horizon R2                    : -1.43066109
full horizon Pearson r             : 0.39490579
full horizon QLIKE                 : 7.44993911

--- Task 3 ---
1 day(s) MAE                       : 0.32330032
1 day(s) RMSE                      : 0.43268948
1 day(s) R2                        : -45.56153394
1 day(s) Pearson r                 : -0.02739129
1 day(s) QLIKE                     : 0.60854556
3 day(s) MAE                       : 0.32162142
3 day(s) RMSE                      : 0.43112690
3 day(s) R2                        : -44.92060048
3 day(s) Pearson r                 : -0.02633203
3 day(s) QLIKE                     : 0.58763693
5 day(s) MAE                       : 0.31748260
5 day(s) RMSE                      : 0.42950320
5 day(s) R2                        : -44.30784753
5 day(s) Pearson r                 : -0.02124632
5 day(s) QLIKE                     : 0.61592280
10 day(s) MAE                      : 0.32094808
10 day(s) RMSE                     : 0.43153178
10 day(s) R2                       : -44.06502947
10 day(s) Pearson r                : -0.02717462
10 day(s) QLIKE                    : 0.58260606
full horizon MAE                   : 0.32094808
full horizon RMSE                  : 0.43153178
full horizon R2                    : -44.06502947
full horizon Pearson r             : -0.02717462
full horizon QLIKE                 : 0.58260606

--- Task 4 ---
1 day(s) MAE                       : 0.03441974
1 day(s) RMSE                      : 0.04187620
1 day(s) R2                        : -2.34726588
1 day(s) Pearson r                 : -0.14887234
1 day(s) QLIKE                     : 1.21700612
3 day(s) MAE                       : 0.03411475
3 day(s) RMSE                      : 0.04137530
3 day(s) R2                        : -2.25422644
3 day(s) Pearson r                 : -0.14350647
3 day(s) QLIKE                     : 1.21457837
5 day(s) MAE                       : 0.03403407
5 day(s) RMSE                      : 0.04122295
5 day(s) R2                        : -2.21714587
5 day(s) Pearson r                 : -0.13890462
5 day(s) QLIKE                     : 1.21527753
10 day(s) MAE                      : 0.03352133
10 day(s) RMSE                     : 0.04024982
10 day(s) R2                       : -2.04296997
10 day(s) Pearson r                : -0.13838299
10 day(s) QLIKE                    : 1.21476666
full horizon MAE                   : 0.03352133
full horizon RMSE                  : 0.04024982
full horizon R2                    : -2.04296997
full horizon Pearson r             : -0.13838299
full horizon QLIKE                 : 1.21476666

--- Task 5 ---
1 day(s) MAE                       : 0.02846146
1 day(s) RMSE                      : 0.03645775
1 day(s) R2                        : -0.37100003
1 day(s) Pearson r                 : 0.03687558
1 day(s) QLIKE                     : 0.44595998
3 day(s) MAE                       : 0.02822321
3 day(s) RMSE                      : 0.03618645
3 day(s) R2                        : -0.35783915
3 day(s) Pearson r                 : 0.04784905
3 day(s) QLIKE                     : 0.44280734
5 day(s) MAE                       : 0.02809971
5 day(s) RMSE                      : 0.03601158
5 day(s) R2                        : -0.35228763
5 day(s) Pearson r                 : 0.05879996
5 day(s) QLIKE                     : 0.43555041
10 day(s) MAE                      : 0.02749074
10 day(s) RMSE                     : 0.03507871
10 day(s) R2                       : -0.30245764
10 day(s) Pearson r                : 0.10021518
10 day(s) QLIKE                    : 0.41013737
full horizon MAE                   : 0.02749074
full horizon RMSE                  : 0.03507871
full horizon R2                    : -0.30245764
full horizon Pearson r             : 0.10021518
full horizon QLIKE                 : 0.41013737

--- Task 6 ---
1 day(s) MAE                       : 2.72223086
1 day(s) RMSE                      : 3.34542469
1 day(s) R2                        : -1.62343703
1 day(s) Pearson r                 : 0.04451103
1 day(s) QLIKE                     : 0.08650612
3 day(s) MAE                       : 2.71703984
3 day(s) RMSE                      : 3.34414439
3 day(s) R2                        : -1.61456157
3 day(s) Pearson r                 : 0.04646716
3 day(s) QLIKE                     : 0.08622608
5 day(s) MAE                       : 2.71302989
5 day(s) RMSE                      : 3.34082510
5 day(s) R2                        : -1.60164713
5 day(s) Pearson r                 : 0.04906290
5 day(s) QLIKE                     : 0.08597071
10 day(s) MAE                      : 2.69763205
10 day(s) RMSE                     : 3.32550123
10 day(s) R2                       : -1.55938825
10 day(s) Pearson r                : 0.05595325
10 day(s) QLIKE                    : 0.08585372
full horizon MAE                   : 2.69763205
full horizon RMSE                  : 3.32550123
full horizon R2                    : -1.55938825
full horizon Pearson r             : 0.05595325
full horizon QLIKE                 : 0.08585372

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/SP500/Custom_KAN_H10.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.143027, max=43.9985

=== SP500 | H=10 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 3466
Time steps for y: 10
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790344
  Min value:  -0.29186160079947243
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.0210836022185426
  Min value:  -28.086926568760678
Checking X_price_val:
Shape: (277, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.189039765537116
  Min value:  -0.288808876061582
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.5448233279041013
  Min value:  -28.086926568760678
Checking X_price_test:
Shape: (694, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.65876861006644
  Min value:  -0.2909517549064528
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 10, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.378408171115352
  Min value:  -28.086926568760678
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 432s 471ms/step - loss: 0.7710 - val_loss: 1.2044 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 24s 305ms/step - loss: 0.5429 - val_loss: 1.5592 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 25s 319ms/step - loss: 0.4981 - val_loss: 1.6962 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 25s 319ms/step - loss: 0.4771 - val_loss: 1.6385 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 25s 325ms/step - loss: 0.4611 - val_loss: 0.9110 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 25s 319ms/step - loss: 0.4464 - val_loss: 1.1856 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=7 tf_ratio=0.684 -> TF=ON
Epoch 7/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 25s 317ms/step - loss: 0.4310 - val_loss: 1.4433 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=8 tf_ratio=0.632 -> TF=ON
Epoch 8/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 25s 322ms/step - loss: 0.4215 - val_loss: 1.4807 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=9 tf_ratio=0.579 -> TF=ON
Epoch 9/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 25s 326ms/step - loss: 0.4120 - val_loss: 1.6645 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=10 tf_ratio=0.526 -> TF=ON
Epoch 10/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 0s 317ms/step - loss: 0.4511
Epoch 10: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
78/78 ━━━━━━━━━━━━━━━━━━━━ 25s 326ms/step - loss: 0.4045 - val_loss: 1.6772 - learning_rate: 5.0000e-04
Epoch 10: early stopping
Restoring model weights from the end of the best epoch: 5.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 10
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.60700916
1 day(s) RMSE                      : 2.79798834
1 day(s) R2                        : 0.02365039
1 day(s) Pearson r                 : 0.37939576
1 day(s) QLIKE                     : 0.71580521
3 day(s) MAE                       : 0.62331469
3 day(s) RMSE                      : 2.77255844
3 day(s) R2                        : 0.04136346
3 day(s) Pearson r                 : 0.31061098
3 day(s) QLIKE                     : 0.69649454
5 day(s) MAE                       : 0.63509430
5 day(s) RMSE                      : 2.79528126
5 day(s) R2                        : 0.02564165
5 day(s) Pearson r                 : 0.24254665
5 day(s) QLIKE                     : 0.79024379
10 day(s) MAE                      : 0.65852162
10 day(s) RMSE                     : 2.81815803
10 day(s) R2                       : 0.00976207
10 day(s) Pearson r                : 0.17296666
10 day(s) QLIKE                    : 0.90315859
full horizon MAE                   : 0.65852162
full horizon RMSE                  : 2.81815803
full horizon R2                    : 0.00976207
full horizon Pearson r             : 0.17296666
full horizon QLIKE                 : 0.90315859

--- Task 2 ---
1 day(s) MAE                       : 0.05699826
1 day(s) RMSE                      : 0.06628169
1 day(s) R2                        : -0.48805936
1 day(s) Pearson r                 : 0.51870784
1 day(s) QLIKE                     : 2.53048542
3 day(s) MAE                       : 0.11230046
3 day(s) RMSE                      : 0.17005702
3 day(s) R2                        : -8.66370278
3 day(s) Pearson r                 : 0.45199457
3 day(s) QLIKE                     : 2.66981245
5 day(s) MAE                       : 0.24504568
5 day(s) RMSE                      : 0.40559282
5 day(s) R2                        : -53.21177575
5 day(s) Pearson r                 : 0.34498303
5 day(s) QLIKE                     : 19.30819638
10 day(s) MAE                      : 0.53629146
10 day(s) RMSE                     : 0.73390532
10 day(s) R2                       : -170.84103215
10 day(s) Pearson r                : 0.27743703
10 day(s) QLIKE                    : 22.80976568
full horizon MAE                   : 0.53629146
full horizon RMSE                  : 0.73390532
full horizon R2                    : -170.84103215
full horizon Pearson r             : 0.27743703
full horizon QLIKE                 : 22.80976568

--- Task 3 ---
1 day(s) MAE                       : 0.42271674
1 day(s) RMSE                      : 0.42801818
1 day(s) R2                        : -44.56160832
1 day(s) Pearson r                 : -0.24347634
1 day(s) QLIKE                     : 0.00324553
3 day(s) MAE                       : 0.58627944
3 day(s) RMSE                      : 0.61153002
3 day(s) R2                        : -91.39168217
3 day(s) Pearson r                 : -0.09950380
3 day(s) QLIKE                     : 0.34016505
5 day(s) MAE                       : 0.70331807
5 day(s) RMSE                      : 0.73294372
5 day(s) R2                        : -130.94163914
5 day(s) Pearson r                 : -0.07290945
5 day(s) QLIKE                     : 4.41346445
10 day(s) MAE                      : 0.80162733
10 day(s) RMSE                     : 0.82200562
10 day(s) R2                       : -162.51728084
10 day(s) Pearson r                : -0.03690136
10 day(s) QLIKE                    : 2.88104607
full horizon MAE                   : 0.80162733
full horizon RMSE                  : 0.82200562
full horizon R2                    : -162.51728084
full horizon Pearson r             : -0.03690136
full horizon QLIKE                 : 2.88104607

--- Task 4 ---
1 day(s) MAE                       : 0.02475781
1 day(s) RMSE                      : 0.02806276
1 day(s) R2                        : -0.50319937
1 day(s) Pearson r                 : -0.42297115
1 day(s) QLIKE                     : 1.11836805
3 day(s) MAE                       : 0.02704340
3 day(s) RMSE                      : 0.03102487
3 day(s) R2                        : -0.82972284
3 day(s) Pearson r                 : -0.33722005
3 day(s) QLIKE                     : 1.14472680
5 day(s) MAE                       : 0.02622032
5 day(s) RMSE                      : 0.03007121
5 day(s) R2                        : -0.71196384
5 day(s) Pearson r                 : -0.31834248
5 day(s) QLIKE                     : 1.15182674
10 day(s) MAE                      : 0.02607916
10 day(s) RMSE                     : 0.03000280
10 day(s) R2                       : -0.69080389
10 day(s) Pearson r                : -0.32839189
10 day(s) QLIKE                    : 1.18569723
full horizon MAE                   : 0.02607916
full horizon RMSE                  : 0.03000280
full horizon R2                    : -0.69080389
full horizon Pearson r             : -0.32839189
full horizon QLIKE                 : 1.18569723

--- Task 5 ---
1 day(s) MAE                       : 0.02484768
1 day(s) RMSE                      : 0.03131470
1 day(s) R2                        : -0.01147316
1 day(s) Pearson r                 : -0.01023164
1 day(s) QLIKE                     : 0.42988841
3 day(s) MAE                       : 0.02534233
3 day(s) RMSE                      : 0.03127813
3 day(s) R2                        : -0.01446685
3 day(s) Pearson r                 : -0.00493144
3 day(s) QLIKE                     : 0.42810785
5 day(s) MAE                       : 0.02549079
5 day(s) RMSE                      : 0.03124765
5 day(s) R2                        : -0.01816780
5 day(s) Pearson r                 : -0.00588288
5 day(s) QLIKE                     : 0.42212264
10 day(s) MAE                      : 0.02535411
10 day(s) RMSE                     : 0.03101775
10 day(s) R2                       : -0.01834996
10 day(s) Pearson r                : -0.01215673
10 day(s) QLIKE                    : 0.40040041
full horizon MAE                   : 0.02535411
full horizon RMSE                  : 0.03101775
full horizon R2                    : -0.01834996
full horizon Pearson r             : -0.01215673
full horizon QLIKE                 : 0.40040041

--- Task 6 ---
1 day(s) MAE                       : 2.26237262
1 day(s) RMSE                      : 2.79642159
1 day(s) R2                        : -0.83304624
1 day(s) Pearson r                 : -0.37024257
1 day(s) QLIKE                     : 0.06212047
3 day(s) MAE                       : 2.29409446
3 day(s) RMSE                      : 2.83541904
3 day(s) R2                        : -0.87959091
3 day(s) Pearson r                 : -0.35067730
3 day(s) QLIKE                     : 0.06371745
5 day(s) MAE                       : 2.28876307
5 day(s) RMSE                      : 2.82880988
5 day(s) R2                        : -0.86529889
5 day(s) Pearson r                 : -0.31015622
5 day(s) QLIKE                     : 0.06501273
10 day(s) MAE                      : 2.28574774
10 day(s) RMSE                     : 2.82103970
10 day(s) R2                       : -0.84179099
10 day(s) Pearson r                : -0.27869712
10 day(s) QLIKE                    : 0.06794023
full horizon MAE                   : 2.28574774
full horizon RMSE                  : 2.82103970
full horizon R2                    : -0.84179099
full horizon Pearson r             : -0.27869712
full horizon QLIKE                 : 0.06794023

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/SP500/Custom_KAN_LSTM_H10.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.102263, max=2.4581

=== SP500 | H=20 | Custom_KAN (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using device: mps
Batch size for y: 3466
Time steps for y: 20
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790344
  Min value:  -0.29186160079947243
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.040427392407918
  Min value:  -28.09802133319718
Checking X_price_val:
Shape: (277, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.189039765537116
  Min value:  -0.288808876061582
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.546217024276721
  Min value:  -28.09802133319718
Checking X_price_test:
Shape: (694, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.65876861006644
  Min value:  -0.2909517549064528
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.3777376348304156
  Min value:  -28.09802133319718
Epoch 001 | phase=1 | train_loss=4.9737 | val_main=1.239120
Epoch 002 | phase=1 | train_loss=4.8934 | val_main=1.242069
Epoch 003 | phase=1 | train_loss=4.7079 | val_main=1.242712
Epoch 004 | phase=1 | train_loss=4.7168 | val_main=1.242313
Epoch 005 | phase=1 | train_loss=4.6247 | val_main=1.241971
Epoch 006 | phase=1 | train_loss=4.6334 | val_main=1.241779
Epoch 007 | phase=1 | train_loss=4.6410 | val_main=1.242062
Epoch 008 | phase=1 | train_loss=4.5637 | val_main=1.241922
Epoch 009 | phase=1 | train_loss=4.5324 | val_main=1.241764
Epoch 010 | phase=1 | train_loss=4.5368 | val_main=1.241874
Epoch 011 | phase=1 | train_loss=4.4415 | val_main=1.241556
Epoch 012 | phase=1 | train_loss=4.3472 | val_main=1.241532
Epoch 013 | phase=1 | train_loss=4.3360 | val_main=1.241778
Epoch 014 | phase=1 | train_loss=4.2677 | val_main=1.241775
Epoch 015 | phase=1 | train_loss=4.1499 | val_main=1.241645
Epoch 016 | phase=0 | train_loss=5.0284 | val_main=1.179077
Epoch 017 | phase=0 | train_loss=4.8452 | val_main=0.874938
Epoch 018 | phase=0 | train_loss=4.7214 | val_main=0.877519
Epoch 019 | phase=0 | train_loss=4.6207 | val_main=0.838269
Epoch 020 | phase=0 | train_loss=4.6058 | val_main=0.867170
Epoch 021 | phase=0 | train_loss=4.5353 | val_main=0.857799
Epoch 022 | phase=0 | train_loss=4.3673 | val_main=0.839675
Epoch 023 | phase=0 | train_loss=4.3281 | val_main=0.833295
Epoch 024 | phase=0 | train_loss=4.3958 | val_main=0.851652
Epoch 025 | phase=0 | train_loss=4.2973 | val_main=0.879776
Epoch 026 | phase=0 | train_loss=4.2225 | val_main=0.897138
Epoch 027 | phase=0 | train_loss=4.1611 | val_main=0.859533
Epoch 028 | phase=0 | train_loss=4.0841 | val_main=0.876749
Epoch 029 | phase=0 | train_loss=4.2711 | val_main=0.856718
Epoch 030 | phase=0 | train_loss=4.1243 | val_main=0.911371
Epoch 031 | phase=2 | train_loss=0.7296 | val_main=0.925956
Epoch 032 | phase=2 | train_loss=0.7278 | val_main=0.904187
Epoch 033 | phase=2 | train_loss=0.7221 | val_main=0.959852
Early stopping triggered.

Parameters used in the single-fit model:
input_dim: 60
output_dim: 120
no_tasks: 6
knots: 8
spline_power: 3
hidden_dim: 128
hidden_layers: 3
dropout: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 50
min_epochs: 30
batch_size: 128
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000
patience: 10
min_delta: 0.00010000
warmup_aux_epochs: 15
joint_epochs: 15
device: mps
verbose: True
checkpoint_path: None

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.59265912
1 day(s) RMSE                      : 2.25889898
1 day(s) R2                        : 0.36363370
1 day(s) Pearson r                 : 0.62893084
1 day(s) QLIKE                     : 0.59107949
3 day(s) MAE                       : 0.63788351
3 day(s) RMSE                      : 2.60422716
3 day(s) R2                        : 0.15423387
3 day(s) Pearson r                 : 0.48532110
3 day(s) QLIKE                     : 0.64034046
5 day(s) MAE                       : 0.69019368
5 day(s) RMSE                      : 2.89454720
5 day(s) R2                        : -0.04478988
5 day(s) Pearson r                 : 0.35316023
5 day(s) QLIKE                     : 0.69370123
10 day(s) MAE                      : 0.72677359
10 day(s) RMSE                     : 3.01979056
10 day(s) R2                       : -0.13700538
10 day(s) Pearson r                : 0.23184946
10 day(s) QLIKE                    : 0.80403601
20 day(s) MAE                      : 0.72558347
20 day(s) RMSE                     : 2.97970693
20 day(s) R2                       : -0.10688997
20 day(s) Pearson r                : 0.16625964
20 day(s) QLIKE                    : 0.84332589
full horizon MAE                   : 0.72558347
full horizon RMSE                  : 2.97970693
full horizon R2                    : -0.10688997
full horizon Pearson r             : 0.16625964
full horizon QLIKE                 : 0.84332589

--- Task 2 ---
1 day(s) MAE                       : 0.06827997
1 day(s) RMSE                      : 0.07896653
1 day(s) R2                        : -1.11212308
1 day(s) Pearson r                 : 0.33606127
1 day(s) QLIKE                     : 7.77364661
3 day(s) MAE                       : 0.06958906
3 day(s) RMSE                      : 0.07962734
3 day(s) R2                        : -1.11874594
3 day(s) Pearson r                 : 0.32472623
3 day(s) QLIKE                     : 7.74911230
5 day(s) MAE                       : 0.07116127
5 day(s) RMSE                      : 0.08141705
5 day(s) R2                        : -1.18445852
5 day(s) Pearson r                 : 0.31553888
5 day(s) QLIKE                     : 7.83587729
10 day(s) MAE                      : 0.07046533
10 day(s) RMSE                     : 0.08051037
10 day(s) R2                       : -1.06799976
10 day(s) Pearson r                : 0.31389389
10 day(s) QLIKE                    : 7.76093613
20 day(s) MAE                      : 0.06675511
20 day(s) RMSE                     : 0.07713142
20 day(s) R2                       : -0.78764336
20 day(s) Pearson r                : 0.31051068
20 day(s) QLIKE                    : 7.46997763
full horizon MAE                   : 0.06675511
full horizon RMSE                  : 0.07713142
full horizon R2                    : -0.78764336
full horizon Pearson r             : 0.31051068
full horizon QLIKE                 : 7.46997763

--- Task 3 ---
1 day(s) MAE                       : 0.36931482
1 day(s) RMSE                      : 0.46141793
1 day(s) R2                        : -51.94970547
1 day(s) Pearson r                 : 0.03819064
1 day(s) QLIKE                     : 0.50332604
3 day(s) MAE                       : 0.36469382
3 day(s) RMSE                      : 0.45754593
3 day(s) R2                        : -50.72097542
3 day(s) Pearson r                 : 0.04333010
3 day(s) QLIKE                     : 0.49241375
5 day(s) MAE                       : 0.36167734
5 day(s) RMSE                      : 0.45653595
5 day(s) R2                        : -50.19064140
5 day(s) Pearson r                 : 0.03885968
5 day(s) QLIKE                     : 0.49848850
10 day(s) MAE                      : 0.36516796
10 day(s) RMSE                     : 0.45767247
10 day(s) R2                       : -49.69016042
10 day(s) Pearson r                : 0.02217279
10 day(s) QLIKE                    : 0.43881711
20 day(s) MAE                      : 0.36868618
20 day(s) RMSE                     : 0.45710436
20 day(s) R2                       : -48.03911860
20 day(s) Pearson r                : 0.00585912
20 day(s) QLIKE                    : 0.38045110
full horizon MAE                   : 0.36868618
full horizon RMSE                  : 0.45710436
full horizon R2                    : -48.03911860
full horizon Pearson r             : 0.00585912
full horizon QLIKE                 : 0.38045110

--- Task 4 ---
1 day(s) MAE                       : 0.03359643
1 day(s) RMSE                      : 0.04057323
1 day(s) R2                        : -2.14220828
1 day(s) Pearson r                 : -0.14427692
1 day(s) QLIKE                     : 1.19686086
3 day(s) MAE                       : 0.03337784
3 day(s) RMSE                      : 0.04029279
3 day(s) R2                        : -2.08617199
3 day(s) Pearson r                 : -0.14260967
3 day(s) QLIKE                     : 1.19917403
5 day(s) MAE                       : 0.03334293
5 day(s) RMSE                      : 0.04017122
5 day(s) R2                        : -2.05508077
5 day(s) Pearson r                 : -0.14092023
5 day(s) QLIKE                     : 1.20096284
10 day(s) MAE                      : 0.03278610
10 day(s) RMSE                     : 0.03926086
10 day(s) R2                       : -1.89527158
10 day(s) Pearson r                : -0.13920179
10 day(s) QLIKE                    : 1.20054683
20 day(s) MAE                      : 0.03215742
20 day(s) RMSE                     : 0.03821614
20 day(s) R2                       : -1.71848624
20 day(s) Pearson r                : -0.15477417
20 day(s) QLIKE                    : 1.20293783
full horizon MAE                   : 0.03215742
full horizon RMSE                  : 0.03821614
full horizon R2                    : -1.71848624
full horizon Pearson r             : -0.15477417
full horizon QLIKE                 : 1.20293783

--- Task 5 ---
1 day(s) MAE                       : 0.02826577
1 day(s) RMSE                      : 0.03593542
1 day(s) R2                        : -0.33199726
1 day(s) Pearson r                 : 0.02985382
1 day(s) QLIKE                     : 0.43866440
3 day(s) MAE                       : 0.02788894
3 day(s) RMSE                      : 0.03555566
3 day(s) R2                        : -0.31091323
3 day(s) Pearson r                 : 0.04036136
3 day(s) QLIKE                     : 0.43407258
5 day(s) MAE                       : 0.02770049
5 day(s) RMSE                      : 0.03526943
5 day(s) R2                        : -0.29712471
5 day(s) Pearson r                 : 0.05997424
5 day(s) QLIKE                     : 0.42581193
10 day(s) MAE                      : 0.02712309
10 day(s) RMSE                     : 0.03451402
10 day(s) R2                       : -0.26086194
10 day(s) Pearson r                : 0.09309808
10 day(s) QLIKE                    : 0.40185822
20 day(s) MAE                      : 0.02622822
20 day(s) RMSE                     : 0.03327206
20 day(s) R2                       : -0.21242980
20 day(s) Pearson r                : 0.13757761
20 day(s) QLIKE                    : 0.32614722
full horizon MAE                   : 0.02622822
full horizon RMSE                  : 0.03327206
full horizon R2                    : -0.21242980
full horizon Pearson r             : 0.13757761
full horizon QLIKE                 : 0.32614722

--- Task 6 ---
1 day(s) MAE                       : 2.74568902
1 day(s) RMSE                      : 3.38405508
1 day(s) R2                        : -1.68437370
1 day(s) Pearson r                 : 0.01165561
1 day(s) QLIKE                     : 0.08719635
3 day(s) MAE                       : 2.73249850
3 day(s) RMSE                      : 3.37550674
3 day(s) R2                        : -1.66383175
3 day(s) Pearson r                 : 0.01426538
3 day(s) QLIKE                     : 0.08657021
5 day(s) MAE                       : 2.72807683
5 day(s) RMSE                      : 3.37177647
5 day(s) R2                        : -1.65007682
5 day(s) Pearson r                 : 0.01655395
5 day(s) QLIKE                     : 0.08642802
10 day(s) MAE                      : 2.71910150
10 day(s) RMSE                     : 3.36625976
10 day(s) R2                       : -1.62251028
10 day(s) Pearson r                : 0.02065933
10 day(s) QLIKE                    : 0.08650766
20 day(s) MAE                      : 2.69855944
20 day(s) RMSE                     : 3.35249890
20 day(s) R2                       : -1.56608468
20 day(s) Pearson r                : 0.02356740
20 day(s) QLIKE                    : 0.08696706
full horizon MAE                   : 2.69855944
full horizon RMSE                  : 3.35249890
full horizon R2                    : -1.56608468
full horizon Pearson r             : 0.02356740
full horizon QLIKE                 : 0.08696706

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/SP500/Custom_KAN_H20.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.137403, max=34.3349

=== SP500 | H=20 | Custom_KAN_LSTM (simple fit) | no_tasks=6 ===
[mode=log_mse] loss_type=mse, target_is_logvar=True, normalize_y=True
Using TensorFlow device: CPU
Batch size for y: 3466
Time steps for y: 20
Features for y: 6

Starting training without CV:
Checking X_price_train_core:
Shape: (2495, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  20.304044071790344
  Min value:  -0.29186160079947243
Checking X_time_train_core:
Shape: (2495, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -2.0363820510485593
Checking y_train_core (log_mse scaled):
Shape: (2495, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  4.040427392407918
  Min value:  -28.09802133319718
Checking X_price_val:
Shape: (277, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.189039765537116
  Min value:  -0.288808876061582
Checking X_time_val:
Shape: (277, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_val (log_mse scaled):
Shape: (277, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  2.546217024276721
  Min value:  -28.09802133319718
Checking X_price_test:
Shape: (694, 60, 1)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  14.65876861006644
  Min value:  -0.2909517549064528
Checking X_time_test:
Shape: (694, 60, 14)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  9.067132736068572
  Min value:  -1.7123832950667475
Checking y_test (log_mse scaled):
Shape: (694, 20, 6)
  Contains NaN values:      False
  Contains +inf values:     False
  Contains -inf values:     False
  Contains any infinities:  False
  Max value:  3.3777376348304156
  Min value:  -28.09802133319718
[TF Scheduler] epoch=1 tf_ratio=1.000 -> TF=ON
Epoch 1/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 209s 515ms/step - loss: 0.7671 - val_loss: 1.2792 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=2 tf_ratio=0.947 -> TF=ON
Epoch 2/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 35s 442ms/step - loss: 0.5125 - val_loss: 1.5834 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=3 tf_ratio=0.895 -> TF=ON
Epoch 3/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 36s 466ms/step - loss: 0.4673 - val_loss: 1.4550 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=4 tf_ratio=0.842 -> TF=ON
Epoch 4/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 36s 460ms/step - loss: 0.4497 - val_loss: 1.5625 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=5 tf_ratio=0.789 -> TF=ON
Epoch 5/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 36s 462ms/step - loss: 0.4318 - val_loss: 1.6495 - learning_rate: 5.0000e-04
[TF Scheduler] epoch=6 tf_ratio=0.737 -> TF=ON
Epoch 6/20
78/78 ━━━━━━━━━━━━━━━━━━━━ 0s 461ms/step - loss: 0.4427
Epoch 6: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
78/78 ━━━━━━━━━━━━━━━━━━━━ 37s 474ms/step - loss: 0.4132 - val_loss: 1.5208 - learning_rate: 5.0000e-04
Epoch 6: early stopping
Restoring model weights from the end of the best epoch: 1.

Parameters used in the single-fit model:
input_dim: 1
no_tasks: 6
task_output_dim: 1
pred_len: 20
hidden_layers: 2
hidden_dim: 32
dropout: 0.00000000
knots: 8
spline_power: 5
sub_kan_configs: None
teacher_forcing: True
tf_ratio_start: 1.00000000
tf_ratio_end: 0.00000000
lr: 0.00050000
l2_weight: 0.00001000
epochs: 20
batch_size: 32
verbose: 1
patience: 5
min_delta: 0.00010000
validation_split: 0.00000000
seed: 42
metrics: ('mse',)
run_eagerly: False
min_epochs: 10
output_dim: 6
loss_type: mse
target_is_logvar: True
nll_eps: 0.00000000
clamp_logvar_min: -20.00000000
clamp_logvar_max: 20.00000000
student_df: 5.00000000

Results for regular train/test evaluation (without CV):

--- Task 1 ---
1 day(s) MAE                       : 0.64767237
1 day(s) RMSE                      : 2.86762199
1 day(s) R2                        : -0.02555123
1 day(s) Pearson r                 : 0.62927649
1 day(s) QLIKE                     : 1.00042933
3 day(s) MAE                       : 0.64801158
3 day(s) RMSE                      : 2.86842303
3 day(s) R2                        : -0.02607466
3 day(s) Pearson r                 : 0.10963418
3 day(s) QLIKE                     : 0.99935628
5 day(s) MAE                       : 0.64892921
5 day(s) RMSE                      : 2.87032285
5 day(s) R2                        : -0.02737544
5 day(s) Pearson r                 : 0.04115390
5 day(s) QLIKE                     : 1.00263756
10 day(s) MAE                      : 0.65086309
10 day(s) RMSE                     : 2.87203464
10 day(s) R2                       : -0.02846193
10 day(s) Pearson r                : 0.02489798
10 day(s) QLIKE                    : 1.00405926
20 day(s) MAE                      : 0.65170702
20 day(s) RMSE                     : 2.87246624
20 day(s) R2                       : -0.02864902
20 day(s) Pearson r                : 0.01759992
20 day(s) QLIKE                    : 1.00202103
full horizon MAE                   : 0.65170702
full horizon RMSE                  : 2.87246624
full horizon R2                    : -0.02864902
full horizon Pearson r             : 0.01759992
full horizon QLIKE                 : 1.00202103

--- Task 2 ---
1 day(s) MAE                       : 0.04241849
1 day(s) RMSE                      : 0.06653976
1 day(s) R2                        : -0.49966939
1 day(s) Pearson r                 : 0.54841295
1 day(s) QLIKE                     : 2.60336329
3 day(s) MAE                       : 0.05068332
3 day(s) RMSE                      : 0.07404670
3 day(s) R2                        : -0.83217050
3 day(s) Pearson r                 : 0.00795080
3 day(s) QLIKE                     : 6.19467582
5 day(s) MAE                       : 0.05261835
5 day(s) RMSE                      : 0.07590526
5 day(s) R2                        : -0.89870221
5 day(s) Pearson r                 : 0.00294387
5 day(s) QLIKE                     : 7.06057813
10 day(s) MAE                      : 0.05454973
10 day(s) RMSE                     : 0.07805465
10 day(s) R2                       : -0.94376780
10 day(s) Pearson r                : -0.00228189
10 day(s) QLIKE                    : 6.64302809
20 day(s) MAE                      : 0.05654097
20 day(s) RMSE                     : 0.08073530
20 day(s) R2                       : -0.95859713
20 day(s) Pearson r                : -0.00709750
20 day(s) QLIKE                    : 5.04555404
full horizon MAE                   : 0.05654097
full horizon RMSE                  : 0.08073530
full horizon R2                    : -0.95859713
full horizon Pearson r             : -0.00709750
full horizon QLIKE                 : 5.04555404

--- Task 3 ---
1 day(s) MAE                       : 0.41880436
1 day(s) RMSE                      : 0.42342587
1 day(s) R2                        : -43.58917075
1 day(s) Pearson r                 : 0.30218690
1 day(s) QLIKE                     : 0.00241607
3 day(s) MAE                       : 0.49828233
3 day(s) RMSE                      : 0.50801501
3 day(s) R2                        : -62.76030656
3 day(s) Pearson r                 : 0.03526802
3 day(s) QLIKE                     : 0.02609402
5 day(s) MAE                       : 0.61592835
5 day(s) RMSE                      : 0.63934119
5 day(s) R2                        : -99.39358914
5 day(s) Pearson r                 : 0.01747071
5 day(s) QLIKE                     : 0.36339575
10 day(s) MAE                      : 0.75091246
10 day(s) RMSE                     : 0.77387645
10 day(s) R2                       : -143.92968531
10 day(s) Pearson r                : 0.01527902
10 day(s) QLIKE                    : 0.79670806
20 day(s) MAE                      : 0.82018370
20 day(s) RMSE                     : 0.83498519
20 day(s) R2                       : -162.63246067
20 day(s) Pearson r                : 0.01846285
20 day(s) QLIKE                    : 0.52762033
full horizon MAE                   : 0.82018370
full horizon RMSE                  : 0.83498519
full horizon R2                    : -162.63246067
full horizon Pearson r             : 0.01846285
full horizon QLIKE                 : 0.52762033

--- Task 4 ---
1 day(s) MAE                       : 0.02104220
1 day(s) RMSE                      : 0.02389576
1 day(s) R2                        : -0.08992699
1 day(s) Pearson r                 : 0.23852544
1 day(s) QLIKE                     : 1.08955013
3 day(s) MAE                       : 0.02114438
3 day(s) RMSE                      : 0.02406950
3 day(s) R2                        : -0.10128434
3 day(s) Pearson r                 : 0.01494281
3 day(s) QLIKE                     : 1.09256870
5 day(s) MAE                       : 0.02019343
5 day(s) RMSE                      : 0.02363673
5 day(s) R2                        : -0.05771181
5 day(s) Pearson r                 : 0.02990537
5 day(s) QLIKE                     : 1.10165721
10 day(s) MAE                      : 0.02007690
10 day(s) RMSE                     : 0.02353760
10 day(s) R2                       : -0.04062451
10 day(s) Pearson r                : 0.00665258
10 day(s) QLIKE                    : 1.10060525
20 day(s) MAE                      : 0.02062980
20 day(s) RMSE                     : 0.02456458
20 day(s) R2                       : -0.12318755
20 day(s) Pearson r                : -0.06888730
20 day(s) QLIKE                    : 1.13319694
full horizon MAE                   : 0.02062980
full horizon RMSE                  : 0.02456458
full horizon R2                    : -0.12318755
full horizon Pearson r             : -0.06888730
full horizon QLIKE                 : 1.13319694

--- Task 5 ---
1 day(s) MAE                       : 0.02570228
1 day(s) RMSE                      : 0.03114222
1 day(s) R2                        : -0.00036142
1 day(s) Pearson r                 : 0.05697330
1 day(s) QLIKE                     : 0.42942536
3 day(s) MAE                       : 0.02606636
3 day(s) RMSE                      : 0.03118846
3 day(s) R2                        : -0.00865848
3 day(s) Pearson r                 : -0.00713684
3 day(s) QLIKE                     : 0.42699054
5 day(s) MAE                       : 0.02606495
5 day(s) RMSE                      : 0.03115202
5 day(s) R2                        : -0.01194579
5 day(s) Pearson r                 : -0.02142971
5 day(s) QLIKE                     : 0.42093034
10 day(s) MAE                      : 0.02640165
10 day(s) RMSE                     : 0.03116828
10 day(s) R2                       : -0.02825788
10 day(s) Pearson r                : -0.01420625
10 day(s) QLIKE                    : 0.39965849
20 day(s) MAE                      : 0.02791691
20 day(s) RMSE                     : 0.03473274
20 day(s) R2                       : -0.32122042
20 day(s) Pearson r                : 0.01577365
20 day(s) QLIKE                    : 0.34276729
full horizon MAE                   : 0.02791691
full horizon RMSE                  : 0.03473274
full horizon R2                    : -0.32122042
full horizon Pearson r             : 0.01577365
full horizon QLIKE                 : 0.34276729

--- Task 6 ---
1 day(s) MAE                       : 2.13753614
1 day(s) RMSE                      : 2.64224300
1 day(s) R2                        : -0.63649107
1 day(s) Pearson r                 : 0.06873530
1 day(s) QLIKE                     : 0.05753544
3 day(s) MAE                       : 2.05253475
3 day(s) RMSE                      : 2.53687454
3 day(s) R2                        : -0.50461994
3 day(s) Pearson r                 : -0.00379009
3 day(s) QLIKE                     : 0.05828848
5 day(s) MAE                       : 1.95710473
5 day(s) RMSE                      : 2.43264542
5 day(s) R2                        : -0.37942640
5 day(s) Pearson r                 : 0.00202893
5 day(s) QLIKE                     : 0.05972980
10 day(s) MAE                      : 1.91800748
10 day(s) RMSE                     : 2.38361609
10 day(s) R2                       : -0.31490557
10 day(s) Pearson r                : -0.00560522
10 day(s) QLIKE                    : 0.05980990
20 day(s) MAE                      : 1.82004543
20 day(s) RMSE                     : 2.28522796
20 day(s) R2                       : -0.19231980
20 day(s) Pearson r                : 0.03530988
20 day(s) QLIKE                    : 0.06126600
full horizon MAE                   : 1.82004543
full horizon RMSE                  : 2.28522796
full horizon R2                    : -0.19231980
full horizon Pearson r             : 0.03530988
full horizon QLIKE                 : 0.06126600

Best model (no-CV) saved to: /Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v8/Saved_objects/hierarchical_results_saved_object/SP500/Custom_KAN_LSTM_H20.pkl

Saved y_true min=0.00323949, max=53.2315
Saved y_pred min=0.333764, max=0.392152
In [836]:
hierarchical_frames = build_metric_frames(hierarchical_results_store, outer_horizon="full", pretty_print=True)
os.makedirs("results", exist_ok=True)
export_metrics(hierarchical_results_store, "results/hierarchical_task_all.csv", outer_horizon="full")
export_metrics(hierarchical_results_store, "results/hierarchical_task_all.txt", outer_horizon="full")
=== AAPL | Custom_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.318615  2.487262  2.555042  2.545927
Pearson r  0.370403  0.284447  0.208310  0.186543
QLIKE      0.368335  0.432096  0.459599  0.471674
R2         0.026075 -0.002711 -0.023170 -0.015726
RMSE       8.529161  8.951316  9.079594  9.065810

=== AAPL | Custom_KAN_LSTM | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.439325  2.733695  2.576263  2.625173
Pearson r  0.451438  0.006517 -0.003502 -0.002359
QLIKE      0.535380  0.557882  0.587486  0.602212
R2        -0.018415 -0.007937 -0.040699 -0.050278
RMSE       8.721797  8.974611  9.157042  9.218717

=== MSFT | Custom_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.065696  2.146139  2.139506  2.173128
Pearson r  0.049339  0.002476 -0.019362 -0.014000
QLIKE      0.480419  0.528343  0.519135  0.509428
R2        -0.013146 -0.029190 -0.033569 -0.030169
RMSE       6.746685  6.800049  6.814750  7.008772

=== MSFT | Custom_KAN_LSTM | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        1.969583  2.149781  2.345673  2.158551
Pearson r  0.262519  0.164579  0.006053  0.006947
QLIKE      0.379534  0.433327  0.474432  0.493606
R2         0.030454  0.004908 -0.003006 -0.022197
RMSE       6.599920  6.686456  6.713235  6.981600

=== GE | Custom_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                  H1         H5        H10        H20
MAE         4.123006   4.326514   4.459059   4.492792
Pearson r   0.012156   0.001579   0.005824  -0.021266
QLIKE       0.691330   0.743176   0.762849   0.783562
R2         -0.010290  -0.015666  -0.015221  -0.023053
RMSE       27.566934  27.638597  27.634563  27.741693

=== GE | Custom_KAN_LSTM | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                  H1         H5        H10        H20
MAE         4.870943   4.523866   4.465643   4.318339
Pearson r   0.046869  -0.000122  -0.000272  -0.002192
QLIKE       0.684368   0.695262   0.683893   0.687734
R2         -0.001825  -0.005724  -0.006250  -0.011102
RMSE       27.451193  27.502984  27.512204  27.579184

=== BAC | Custom_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.218764  2.315760  2.377755  2.360622
Pearson r  0.360256  0.162822 -0.044596 -0.054259
QLIKE      0.339648  0.423011  0.433186  0.430008
R2         0.090281  0.009696 -0.010677 -0.011761
RMSE       7.403981  7.728616  7.808080  7.810978

=== BAC | Custom_KAN_LSTM | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        1.984658  2.174726  2.300111  2.308275
Pearson r  0.437762  0.164894 -0.001345  0.000910
QLIKE      0.272968  0.391940  0.439506  0.422809
R2         0.160222 -0.008393 -0.022134 -0.012223
RMSE       7.113674  7.798882  7.852211  7.812762

=== C | Custom_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.978088  2.903246  2.887941  2.952660
Pearson r  0.483365  0.142331  0.083177  0.034534
QLIKE      0.274388  0.416982  0.429684  0.436819
R2         0.096491 -0.009518 -0.027654 -0.028733
RMSE       8.972150  9.522281  9.618667  9.627530

=== C | Custom_KAN_LSTM | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        2.713545  2.853104  2.939848  2.934725
Pearson r  0.479963  0.179852  0.010335  0.004627
QLIKE      0.311682  0.405071  0.457465  0.432778
R2         0.179079 -0.039958 -0.053677 -0.024680
RMSE       8.552261  9.664776  9.739689  9.608544

=== BTCUSDT | Custom_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        3.996892  4.508294  4.674688  4.774879
Pearson r  0.378138  0.162538  0.044634 -0.003729
QLIKE      0.255808  0.367620  0.404387  0.419134
R2         0.065903 -0.027110 -0.075247 -0.080072
RMSE       8.878098  9.308816  9.525601  9.545265

=== BTCUSDT | Custom_KAN_LSTM | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5        H10       H20
MAE        4.456452  4.788267   5.306979  5.059873
Pearson r  0.311768  0.176070   0.111611  0.004598
QLIKE      0.323612  0.370649   0.419227  0.424784
R2         0.011846 -0.125630  -0.230204 -0.118786
RMSE       9.131376  9.745045  10.188888  9.714826

=== EURUSD | Custom_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.088158  0.092045  0.096391  0.099675
Pearson r  0.599808  0.509232  0.477334  0.424556
QLIKE      0.428952  0.406325  0.426089  0.474492
R2         0.343572  0.243549  0.198655  0.143635
RMSE       0.173094  0.184059  0.188544  0.193770

=== EURUSD | Custom_KAN_LSTM | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.107669  0.119398  0.116493  0.140131
Pearson r  0.413050  0.369016  0.308140  0.005337
QLIKE      0.489902  0.522251  0.519447  0.596096
R2         0.104308 -0.031741 -0.048783 -0.326733
RMSE       0.202194  0.214957  0.215698  0.241184

=== GOLD | Custom_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.309377  0.311183  0.317904  0.346815
Pearson r  0.447552  0.478758  0.488464  0.401298
QLIKE      0.513822  0.488001  0.509680  0.560789
R2         0.173158  0.192401  0.208005  0.097801
RMSE       0.595884  0.589995  0.587553  0.631780

=== GOLD | Custom_KAN_LSTM | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.408012  0.454040  0.450157  0.426175
Pearson r  0.357309  0.314968  0.065464  0.008281
QLIKE      0.499330  0.505073  0.536587  0.581421
R2        -0.082473 -0.240777 -0.283731 -0.078860
RMSE       0.681803  0.731303  0.748037  0.690872

=== SP500 | Custom_KAN | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.484774  0.650754  0.725642  0.725583
Pearson r  0.652686  0.344270  0.236617  0.166260
QLIKE      0.443646  0.669757  0.792334  0.843326
R2         0.370255  0.019755 -0.202000 -0.106890
RMSE       2.247116  2.803713  3.104901  2.979707

=== SP500 | Custom_KAN_LSTM | using all outer horizons [1, 5, 10, 20] | task=Task 1 | full-only ===
                 H1        H5       H10       H20
MAE        0.581925  0.642370  0.658522  0.651707
Pearson r  0.471433  0.235388  0.172967  0.017600
QLIKE      0.616883  0.798351  0.903159  1.002021
R2         0.081577  0.018913  0.009762 -0.028649
RMSE       2.713717  2.804916  2.818158  2.872466
Out[836]:
{'mode': 'text',
 'path': '/Users/silviumatu/Desktop/Code/Python/Licenta/Licenta_INFO_v9/results/hierarchical_task_all.txt',
 'sections': 18}
In [837]:
saved_hierarchical_plots = plot_metric_vs_horizon(
    hierarchical_frames,
    tickers=None,          
    models=None,           
    metrics=None,         
    include_full=True,
    save_dir="plots/hierarchical",
    show=True
)
print("Saved", len(saved), "charts to plots/hierarchical")


saved_hierarchical_y_plots = plot_h1_full_from_results(
    hierarchical_results_store,
    tickers=None,
    models=None,
    save_dir="plots/hierarchical/y_plots",
    show=True,
    verbose=True
)
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
Saved 10 charts to plots/hierarchical
  [AAPL H=1] Custom_KAN: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [AAPL H=1] Custom_KAN_LSTM: blob_shape=(404, 1, 2), yt=ok, yp=ok
[AAPL H=1] actual=YES

[AAPL H=1] Aligned series head (first 50 of 404 rows):
  Actual  Custom_KAN_pred  Custom_KAN_LSTM_pred
0.934885          1.19949               2.41671
 1.65221           1.1972               2.41671
 3.00982           1.1994               2.41671
 7.17364          1.21536               2.41671
 2.32415          1.30248               2.41672
  1.8112           1.2985               2.41671
 2.83463          1.29008               2.41672
 2.29244          1.30762               2.41672
 1.60917          1.31178               2.41672
  1.9016          1.28338               2.41672
 2.02761          1.28975               2.41672
0.882809          1.29439               2.41672
 1.75996          1.28112               2.41671
 1.93843          1.27651               2.41671
 1.30271            1.289               2.41671
 1.66381           1.2949               2.41671
  1.7326          1.29491               2.41671
 1.90607          1.27515               2.41671
 2.70629          1.27923               2.41671
 1.98734          1.29793               2.41671
 4.17835          1.28181               2.41671
 2.23484          1.34233               2.41672
  2.5908          1.34213               2.41672
 1.38316          1.35036               2.41672
 3.07923          1.33739               2.41672
 2.16603          1.35053               2.41672
 1.08108          1.35272               2.41672
 1.05538          1.33887               2.41672
 1.56663          1.32561               2.41671
 1.20165           1.3273               2.41671
 3.78232          1.28792               2.41671
  1.6213          1.33214               2.41672
 2.03959           1.3245               2.41671
  2.6352          1.32788               2.41672
 1.42274          1.33597               2.41672
   2.873          1.33285               2.41671
 1.51033           1.3382               2.41672
 2.09779          1.31948               2.41671
 4.03363           1.3201               2.41672
 2.80041          1.38374               2.41672
 2.38801          1.36705               2.41672
  2.0338          1.35581               2.41672
 2.19524          1.37625               2.41672
 11.9895          1.36734               2.41672
 3.66056          1.58912               2.41674
  1.3156          1.57306               2.41672
0.963659          1.53643               2.41673
 1.08602          1.51526               2.41672
 1.29415          1.46083               2.41672
0.888571          1.43817               2.41672
No description has been provided for this image
  [MSFT H=1] Custom_KAN: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [MSFT H=1] Custom_KAN_LSTM: blob_shape=(404, 1, 2), yt=ok, yp=ok
[MSFT H=1] actual=YES

[MSFT H=1] Aligned series head (first 50 of 404 rows):
 Actual  Custom_KAN_pred  Custom_KAN_LSTM_pred
1.62646          2.22021               1.45818
1.38102          2.15964               1.44234
1.45815          2.12555               1.44359
1.91787          2.13263               1.43549
1.81463          2.15343                1.4279
1.90777          2.06604               1.41948
1.13204          2.13591                1.4157
2.16207          2.14262               1.38857
1.60526          2.11671               1.39645
1.36873          2.12241               1.39064
1.54924          2.05999               1.37638
1.38923          2.10711               1.37061
1.71635          2.11613               1.36279
7.14713          2.09352               1.36281
2.33716          2.11699               1.49179
2.91188           2.1708               1.51338
2.05814          2.11762               1.53022
2.23013          2.11176               1.52305
 2.9262          2.15948               1.51272
2.44337          2.12708               1.53692
3.72187          2.19136               1.53591
2.39093          2.14672               1.59241
3.38264           2.0828               1.58948
1.96965          2.13461               1.61796
5.97013          2.05984               1.58917
3.19073          2.13934               1.98441
1.62044          2.22036               2.17582
1.86668          2.09107                1.7502
1.46844          2.12469               1.62203
1.78506          2.04927                1.5676
2.02385           2.1187                1.5204
2.15305          2.06661               1.49572
3.35866          2.04965               1.48094
4.75959          1.93655               1.51698
2.26205           1.9536               1.62801
 2.5589           1.9319               1.61949
28.4576          1.90207               1.59337
5.29249          2.45396               5.59901
5.58633          2.58595               5.63937
2.95631          2.43606               5.32488
 2.9314           2.3637                4.7406
2.15893          2.38768               4.35883
2.50397          2.44829               3.69189
2.31466          2.40853               3.42803
 1.9771          2.40624               3.21562
1.16435          2.39158               2.89155
1.01889          2.15879               2.53758
1.35823           2.2645               1.85151
1.42899          2.19558                1.5984
1.02177          2.21435               1.52827
No description has been provided for this image
  [GE H=1] Custom_KAN: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [GE H=1] Custom_KAN_LSTM: blob_shape=(404, 1, 2), yt=ok, yp=ok
[GE H=1] actual=YES

[GE H=1] Aligned series head (first 50 of 404 rows):
  Actual  Custom_KAN_pred  Custom_KAN_LSTM_pred
  1.5135          2.62958               4.74942
 1.72313           2.6234               4.74942
 1.67254          2.60952               4.74942
 1.66078          2.62365               4.74942
0.769403          2.61986               4.74942
 1.04217          2.61249               4.74942
 3.65719          2.63235               4.74942
 1.16201          2.63027               4.74942
 4.30547           2.6139               4.74942
 1.37445          2.58896               4.74942
 1.38507          2.58454               4.74942
 1.13075           2.5748               4.74942
 1.22114          2.57874               4.74942
 1.99537          2.56524               4.74942
 2.85347           2.5721               4.74942
0.960334          2.58125               4.74942
 2.10725          2.57416               4.74942
  2.0909          2.58061               4.74942
 1.68286          2.58339               4.74942
 3.11206          2.58157               4.74942
 2.59462          2.60752               4.74942
 4.00565          2.57793               4.74942
 2.58083          2.59204               4.74942
 1.92091          2.58972               4.74942
 3.17544          2.57879               4.74942
 2.71117          2.59879               4.74942
 1.39353          2.60273               4.74942
 2.01672          2.59915               4.74942
 2.69781          2.60441               4.74942
 2.05652          2.60025               4.74942
 2.32011          2.60447               4.74942
 1.69183          2.61983               4.74942
 3.70289           2.6081               4.74942
 3.22004          2.57598               4.74942
  4.1739          2.58382               4.74942
  3.9362          2.58502               4.74943
 20.9567          2.59216               4.74943
 4.07076          2.67048               4.74944
 7.35977          2.67369               4.74945
 4.59998          2.68347               4.74945
 3.23707          2.71246               4.74945
 3.27846          2.68517               4.74945
 7.43036          2.70505               4.74944
 2.44681          2.70852               4.74944
 1.00844          2.70774               4.74944
 2.67284          2.68832               4.74944
 1.85009          2.70225               4.74943
 2.73625          2.68353               4.74943
 1.99492          2.69937               4.74943
 1.32531          2.69042               4.74943
No description has been provided for this image
  [BAC H=1] Custom_KAN: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [BAC H=1] Custom_KAN_LSTM: blob_shape=(404, 1, 2), yt=ok, yp=ok
[BAC H=1] actual=YES

[BAC H=1] Aligned series head (first 50 of 404 rows):
 Actual  Custom_KAN_pred  Custom_KAN_LSTM_pred
2.28991          1.79203               1.62009
2.58025          1.83137               1.65437
1.43685          1.81755               1.70708
1.40532          1.82192               1.66887
1.62484          1.73964               1.63696
2.44658          1.71432               1.63278
1.56274          1.76774               1.67399
2.04251           1.7558               1.65693
1.92693          1.81258               1.66462
 1.4117          1.77126               1.67169
1.18859           1.8158               1.64326
1.43173          1.77945               1.61318
1.53413          1.76102               1.60624
2.43259          1.85296               1.60542
1.83194          1.81342               1.64349
1.89469          1.77622               1.64915
2.37737          1.75054               1.65103
2.50765          1.68169               1.68564
3.17295          1.76383               1.72964
2.17531           1.7835               1.84894
3.21212          1.78605               1.83289
2.57597          1.86529               1.92585
2.49883          1.85048               1.92135
2.39942          1.87684               1.90979
5.25542          1.87626               1.90109
5.78329          2.08839               2.42634
2.47379           2.2391                3.1094
2.97723          2.18929               2.65656
2.52012          2.15886               2.62751
10.1661          2.10366               2.42103
3.21457          2.60243               4.42903
8.54499           2.5696                3.2979
4.49535          2.99583               4.14051
4.60281          3.03314               3.79923
3.86461          2.92587               3.49156
3.30898           2.8859               3.39191
 2.6722          3.02325               3.20113
2.11101          2.79741               2.96784
 4.7242          2.70799               2.75853
3.81735          2.77711               3.37629
2.94351          2.79217               3.12513
2.42652          2.65585               2.86305
2.96869          2.55386               2.64965
2.88004          2.61336               2.66233
7.20205          2.52962               2.54334
3.87685           2.8543               3.76743
2.27628          2.89852               3.20411
2.08444          2.86025               2.78926
3.13418          2.51228               2.55478
3.92102           2.4845               2.66515
No description has been provided for this image
  [C H=1] Custom_KAN: blob_shape=(404, 1, 2), yt=ok, yp=ok
  [C H=1] Custom_KAN_LSTM: blob_shape=(404, 1, 2), yt=ok, yp=ok
[C H=1] actual=YES

[C H=1] Aligned series head (first 50 of 404 rows):
 Actual  Custom_KAN_pred  Custom_KAN_LSTM_pred
 1.4361          1.61315               1.55255
 2.4089          1.72313               1.55103
1.70682          1.83608                1.5575
 1.3802          1.73085               1.55482
1.42651          1.49696               1.55232
2.28623          1.39389               1.55051
3.09043          2.50494               1.55637
3.47712           2.0521                1.5676
2.10921          2.35139                1.5857
3.05691          1.67253               1.57905
1.32975          2.75387               1.59699
1.00699          1.53955               1.57376
1.56839          1.61568               1.56194
1.79976          1.36946               1.55885
3.40343          1.55363               1.55891
2.66085          1.60086               1.57622
3.50115          1.44726               1.57933
2.93494          1.81702               1.60817
3.77812            1.952               1.62002
2.79098          2.71382               1.68594
2.48685          2.30216               1.68393
1.80238          2.04674               1.66663
4.05301          2.24658               1.62126
  2.247          2.37602               1.71181
3.71379            1.848               1.66661
4.11532           2.1277               1.73817
3.12293          2.75956               1.87834
2.66388          2.16317               1.87992
5.69437          2.17952               1.81883
9.77504          3.75105               2.18227
4.82663          5.36929               3.08052
5.00513          4.23115                3.0356
4.18801          4.56509               3.15222
2.87579          3.96287               3.11619
3.95956          3.24976               2.82096
2.62006          3.45699               2.87351
1.79606           2.8276               2.67658
2.26218          2.56078                2.5467
6.96561          2.44397               2.32597
3.21371          3.71038               2.91514
3.60813          2.63559               2.69264
2.14357           2.7847               2.66517
2.83844          2.43422               2.37193
2.47451          2.30596               2.26827
5.06039           2.2874               2.18536
2.61845          3.33734               2.29653
1.97985          2.94661               2.21067
   2.24          2.61437               2.04176
5.28614          2.45761               1.90302
1.59823          3.55161               2.18998
No description has been provided for this image
  [BTCUSDT H=1] Custom_KAN: blob_shape=(480, 1, 2), yt=ok, yp=ok
  [BTCUSDT H=1] Custom_KAN_LSTM: blob_shape=(480, 1, 2), yt=ok, yp=ok
[BTCUSDT H=1] actual=YES

[BTCUSDT H=1] Aligned series head (first 50 of 480 rows):
  Actual  Custom_KAN_pred  Custom_KAN_LSTM_pred
 6.92748          3.48175               6.12203
 1.26781          2.58393               6.40278
0.752504          1.99679               3.86965
 10.0004          2.81132               3.38879
 8.45084          6.45316               4.59524
 5.32244          5.79103               4.34959
 2.80991          4.04693               3.77076
 5.32016          2.61173               3.52791
 1.27538          1.79704               3.69467
  3.3705          1.76765               2.91812
 4.30039          2.74818                2.8623
  4.5337          3.97239               2.84491
 6.04033          3.00672               2.75938
 4.38553          3.73992               2.93889
 3.19392          2.86888                 2.774
0.828641          2.52012               2.62536
 1.72163          2.01967               2.44806
 3.29458          2.98017               2.43211
 2.52403          3.66452               2.49009
 3.66699           3.8424               2.43723
 3.61632          2.97193               2.48626
 8.21189          2.92507                2.5068
 2.21355          2.05444               3.01136
 2.72723          2.25223               2.49819
 6.91185          2.65374               2.47909
  7.1909          4.13726               2.99248
 6.48986          4.29747               3.14206
 6.82806          4.16423               3.05829
 4.04652          3.53064               3.35959
 3.11243          2.03647               2.85748
  2.5171          2.03228                2.6793
 2.75687          2.65182               2.59871
 6.87132          3.79279                2.5427
 4.43353          4.42485               2.99567
 4.92989          3.75903               2.72238
 3.08011           2.4104               2.74078
 1.10511          1.41326               2.61837
 1.37997          1.66678               2.43203
 6.66109           2.1173               2.41466
 13.9372           4.2446                2.7803
 26.0966          7.07464               4.92825
 18.2381          10.0231                9.8953
  8.3292          9.26931               7.90272
 2.68908           4.6492               6.50934
 3.12688          4.09763               5.69295
 23.8375          6.02919               4.98481
 50.1271          13.2263               11.6708
 25.8546          20.0603               24.8581
 9.41359          16.4818               18.5387
 16.7212          11.4972               11.5373
No description has been provided for this image
  [EURUSD H=1] Custom_KAN: blob_shape=(757, 1, 2), yt=ok, yp=ok
  [EURUSD H=1] Custom_KAN_LSTM: blob_shape=(757, 1, 2), yt=ok, yp=ok
[EURUSD H=1] actual=YES

[EURUSD H=1] Aligned series head (first 50 of 757 rows):
    Actual  Custom_KAN_pred  Custom_KAN_LSTM_pred
  0.739859         0.607651              0.268178
  0.743822         0.386594              0.270754
   1.02363         0.348778              0.271516
 0.0199331        0.0559565              0.272158
   0.52303         0.389376              0.270718
  0.355217         0.279801               0.27113
  0.417086         0.728095              0.270513
  0.266624         0.533505              0.270521
  0.312901         0.433327              0.269751
0.00443731       0.00655468              0.269215
  0.261753         0.665032              0.266524
  0.788937         0.235256              0.263815
  0.598342         0.725181              0.267831
  0.723261         0.571858              0.270252
  0.341589         0.427038              0.271354
 0.0137579        0.0370461              0.270738
   0.29405         0.328636               0.26881
  0.513047         0.423259              0.267813
  0.252839         0.359412              0.268583
  0.305402         0.569082              0.268119
  0.226156         0.236479              0.267583
0.00963577         0.038558              0.266194
 0.0784947         0.199045              0.261211
  0.199036         0.366473              0.247567
  0.171632         0.373385              0.218563
  0.201223         0.284305              0.186591
  0.353565         0.216012              0.170578
 0.0670925        0.0234985              0.157734
    0.2403         0.307552              0.156828
  0.443555         0.447809              0.158891
  0.418858         0.396097              0.152236
  0.378402         0.396133              0.167059
  0.644091         0.372788              0.189981
 0.0152947        0.0342811              0.243805
  0.272258         0.213232              0.256396
   0.23458         0.262449               0.25859
  0.249941         0.342523              0.256754
  0.705648         0.290315              0.253734
  0.293732         0.267186              0.261782
0.00988965        0.0121514              0.265145
  0.164063         0.121439              0.262569
  0.414405          0.45376              0.256583
  0.477121         0.366059              0.253374
  0.264156         0.387199              0.258174
  0.210154         0.219699              0.261001
 0.0136265        0.0190497              0.260359
  0.265973         0.214395              0.252331
  0.234507         0.450987              0.238072
  0.284101         0.380328              0.220388
  0.226239         0.263445              0.207918
No description has been provided for this image
  [GOLD H=1] Custom_KAN: blob_shape=(1107, 1, 2), yt=ok, yp=ok
  [GOLD H=1] Custom_KAN_LSTM: blob_shape=(1107, 1, 2), yt=ok, yp=ok
[GOLD H=1] actual=YES

[GOLD H=1] Aligned series head (first 50 of 1107 rows):
   Actual  Custom_KAN_pred  Custom_KAN_LSTM_pred
 0.983207         0.701758              0.273914
0.0725466        0.0823518              0.274401
 0.365228         0.481668              0.273761
 0.675363         0.720237              0.273848
  1.19577         0.618304              0.273905
 0.396614         0.384839              0.274294
 0.569428          0.38176              0.273966
0.0394832         0.734734              0.274082
 0.540289         0.624824              0.273594
 0.645375         0.658961              0.273806
 0.596069         0.745082              0.273801
 0.496734         0.599063              0.273755
 0.986594         0.583062              0.273703
0.0170706        0.0170588              0.273946
 0.388559         0.656164              0.273456
 0.616087          0.67779              0.273773
 0.603893          0.69176              0.273831
 0.739671         0.602324              0.273768
 0.832053         0.626191               0.27371
0.0110286        0.0288489              0.273781
 0.506412         0.585677              0.273382
 0.392005         0.649655               0.27367
  0.80787         0.665953              0.273761
 0.535457         0.655078              0.273809
 0.837414         0.648757              0.273604
0.0220968        0.0186792              0.273725
 0.498523         0.622294              0.273425
 0.472977         0.547215              0.273758
  1.61389         0.676914              0.273823
 0.641823         0.612712              0.274545
 0.600193         0.551905              0.275096
0.0226182        0.0473576              0.276448
 0.411008         0.669144              0.275287
 0.743739         0.528199              0.274043
 0.369762         0.724526              0.274035
  0.57285         0.725252              0.273748
  1.20635         0.538979              0.273812
0.0659951        0.0290663              0.274545
  1.33651          0.58866              0.273808
 0.970258         0.660859              0.275569
 0.553171         0.573039              0.279112
 0.193903         0.509898              0.282449
  1.26963         0.638561              0.281787
 0.222961        0.0525852              0.284728
 0.483934         0.638752              0.286731
   1.3403         0.674858              0.283263
  1.06602         0.643749              0.287496
 0.692459         0.412385              0.302146
 0.757949         0.467479              0.338778
0.0295473         0.118015              0.376959
No description has been provided for this image
  [SP500 H=1] Custom_KAN: blob_shape=(694, 1, 2), yt=ok, yp=ok
  [SP500 H=1] Custom_KAN_LSTM: blob_shape=(694, 1, 2), yt=ok, yp=ok
[SP500 H=1] actual=YES

[SP500 H=1] Aligned series head (first 50 of 694 rows):
   Actual  Custom_KAN_pred  Custom_KAN_LSTM_pred
   1.0351          1.58101               0.87748
  1.03491          1.63305              0.833625
 0.818223          1.19753              0.803877
0.0217497         0.274999               0.77482
 0.502587         0.502349              0.618308
  2.13502         0.584809              0.608759
 0.903485           1.3048              0.802115
 0.898743          1.37717              0.762571
 0.904151         0.859937              0.737796
0.0405714         0.212866              0.732258
 0.113815         0.571309              0.592389
  0.60793         0.851078              0.574063
  0.92855         0.811166              0.552216
 0.794209          0.83568              0.593118
 0.910428         0.587703              0.595495
0.0104435         0.139828              0.612359
 0.610551          0.32853              0.503915
 0.532918          1.02085              0.472238
 0.832645         0.996561              0.468645
 0.902649         0.934651              0.541841
 0.540703          0.87642              0.586514
0.0320152        0.0433334              0.569987
  0.44739         0.366324              0.399505
  1.17379         0.790062              0.364112
 0.784725         0.951741              0.552952
  1.19858         0.999232              0.584269
   3.1834         0.636494              0.632736
  1.95774          0.28704              0.993249
  5.83359           1.2685               1.04276
  1.91438          2.50608               1.26875
  2.82295          2.34888                 1.206
  1.97836          2.17837               1.29349
  1.67848          2.12045               1.27739
 0.231827         0.890928               1.22371
  1.99306          1.98042               1.05096
 0.595525          1.56959               1.04029
   2.0958          1.39722              0.932883
  1.44715           1.4858               1.00623
   1.8779          1.39319              0.987773
0.0836623         0.390448               1.00878
 0.723351           1.3374              0.887578
 0.463055         0.803606              0.835048
 0.331959          1.16216              0.765568
 0.355102          1.03631              0.660686
 0.359515          0.73861              0.606783
0.0698865        0.0532842              0.592606
 0.418851         0.588901              0.436196
 0.479569         0.540078              0.377433
 0.468942         0.475543              0.364782
 0.278331         0.642221              0.353861
No description has been provided for this image